aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/LICENSE.qlcnic327
-rw-r--r--Documentation/networking/dccp.txt20
-rw-r--r--Documentation/networking/ip-sysctl.txt24
-rw-r--r--Documentation/networking/stmmac.txt48
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c32
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/lanai.c7
-rw-r--r--drivers/block/aoe/aoecmd.c6
-rw-r--r--drivers/infiniband/core/addr.c14
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c4
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c2
-rw-r--r--drivers/isdn/hisax/callc.c4
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc_2bds0.c4
-rw-r--r--drivers/isdn/hisax/hfc_2bs0.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/hfc_sx.c6
-rw-r--r--drivers/isdn/hisax/hisax.h2
-rw-r--r--drivers/isdn/hisax/ipacx.c2
-rw-r--r--drivers/isdn/hisax/isar.c6
-rw-r--r--drivers/isdn/hisax/isdnl1.h1
-rw-r--r--drivers/isdn/hisax/isdnl3.c2
-rw-r--r--drivers/isdn/hisax/netjet.c10
-rw-r--r--drivers/isdn/hisax/st5481_d.c6
-rw-r--r--drivers/isdn/i4l/isdn_concap.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c20
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c26
-rw-r--r--drivers/isdn/mISDN/layer1.c10
-rw-r--r--drivers/isdn/mISDN/layer2.c12
-rw-r--r--drivers/isdn/mISDN/tei.c23
-rw-r--r--drivers/net/3c507.c2
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/82596.c2
-rw-r--r--drivers/net/Kconfig255
-rw-r--r--drivers/net/arm/am79c961a.c9
-rw-r--r--drivers/net/arm/w90p910_ether.c2
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/ax88796.c8
-rw-r--r--drivers/net/benet/be.h39
-rw-r--r--drivers/net/benet/be_cmds.c69
-rw-r--r--drivers/net/benet/be_cmds.h42
-rw-r--r--drivers/net/benet/be_hw.h39
-rw-r--r--drivers/net/benet/be_main.c242
-rw-r--r--drivers/net/bnx2.c54
-rw-r--r--drivers/net/bnx2.h2
-rw-r--r--drivers/net/bnx2x/bnx2x.h11
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c43
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h10
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c67
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h42
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c4
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c354
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h5
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_main.c18
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/can/Kconfig21
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c939
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c8
-rw-r--r--drivers/net/can/slcan.c755
-rw-r--r--drivers/net/cnic.c2
-rw-r--r--drivers/net/cris/eth_v10.c34
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c3
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c6
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c6
-rw-r--r--drivers/net/cxgb4vf/adapter.h2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c32
-rw-r--r--drivers/net/cxgb4vf/sge.c9
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c5
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000/e1000_main.c6
-rw-r--r--drivers/net/e1000e/82571.c143
-rw-r--r--drivers/net/e1000e/defines.h1
-rw-r--r--drivers/net/e1000e/netdev.c57
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/ehea/ehea_main.c4
-rw-r--r--drivers/net/enic/enic.h2
-rw-r--r--drivers/net/enic/enic_main.c4
-rw-r--r--drivers/net/ethoc.c160
-rw-r--r--drivers/net/fec_mpc52xx.c19
-rw-r--r--drivers/net/forcedeth.c1134
-rw-r--r--drivers/net/igb/igb_main.c6
-rw-r--r--drivers/net/igbvf/Makefile2
-rw-r--r--drivers/net/igbvf/defines.h2
-rw-r--r--drivers/net/igbvf/ethtool.c2
-rw-r--r--drivers/net/igbvf/igbvf.h3
-rw-r--r--drivers/net/igbvf/mbx.c2
-rw-r--r--drivers/net/igbvf/mbx.h2
-rw-r--r--drivers/net/igbvf/netdev.c17
-rw-r--r--drivers/net/igbvf/regs.h2
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/igbvf/vf.h2
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h122
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c58
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c136
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c192
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c55
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c266
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c15
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2048
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c40
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c52
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h32
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c722
-rw-r--r--drivers/net/ixgbevf/Makefile2
-rw-r--r--drivers/net/ixgbevf/defines.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c19
-rw-r--r--drivers/net/ixgbevf/mbx.c2
-rw-r--r--drivers/net/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ixgbevf/regs.h2
-rw-r--r--drivers/net/ixgbevf/vf.c2
-rw-r--r--drivers/net/ixgbevf/vf.h2
-rw-r--r--drivers/net/jme.c16
-rw-r--r--drivers/net/ks8851.c33
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c2
-rw-r--r--drivers/net/lib8390.c24
-rw-r--r--drivers/net/macvlan.c113
-rw-r--r--drivers/net/ne-h8300.c12
-rw-r--r--drivers/net/netxen/netxen_nic_init.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/pcmcia/axnet_cs.c18
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/ppp_generic.c12
-rw-r--r--drivers/net/pptp.c3
-rw-r--r--drivers/net/qla3xxx.c8
-rw-r--r--drivers/net/qlcnic/qlcnic.h42
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c28
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c130
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h25
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c76
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c123
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c92
-rw-r--r--drivers/net/qlge/qlge.h4
-rw-r--r--drivers/net/qlge/qlge_dbg.c21
-rw-r--r--drivers/net/qlge/qlge_ethtool.c19
-rw-r--r--drivers/net/qlge/qlge_main.c8
-rw-r--r--drivers/net/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/s2io.c3
-rw-r--r--drivers/net/sc92031.c3
-rw-r--r--drivers/net/sfc/efx.c14
-rw-r--r--drivers/net/sfc/ethtool.c60
-rw-r--r--drivers/net/sfc/falcon.c183
-rw-r--r--drivers/net/sfc/falcon_boards.c120
-rw-r--r--drivers/net/sfc/falcon_xmac.c14
-rw-r--r--drivers/net/sfc/filter.c3
-rw-r--r--drivers/net/sfc/mcdi.c3
-rw-r--r--drivers/net/sfc/mcdi_phy.c1
-rw-r--r--drivers/net/sfc/mdio_10g.c1
-rw-r--r--drivers/net/sfc/mtd.c98
-rw-r--r--drivers/net/sfc/net_driver.h17
-rw-r--r--drivers/net/sfc/nic.c48
-rw-r--r--drivers/net/sfc/nic.h12
-rw-r--r--drivers/net/sfc/qt202x_phy.c6
-rw-r--r--drivers/net/sfc/siena.c10
-rw-r--r--drivers/net/sfc/spi.h5
-rw-r--r--drivers/net/sfc/tenxpress.c2
-rw-r--r--drivers/net/sfc/tx.c8
-rw-r--r--drivers/net/sh_eth.c244
-rw-r--r--drivers/net/stmmac/stmmac.h40
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c16
-rw-r--r--drivers/net/stmmac/stmmac_main.c223
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c8
-rw-r--r--drivers/net/tg3.c283
-rw-r--r--drivers/net/tg3.h42
-rw-r--r--drivers/net/usb/Kconfig19
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cdc_ncm.c1213
-rw-r--r--drivers/net/usb/hso.c1
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/usbnet.c45
-rw-r--r--drivers/net/via-rhine.c326
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c951
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c174
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h73
-rw-r--r--drivers/net/vxge/vxge-config.c1262
-rw-r--r--drivers/net/vxge/vxge-config.h135
-rw-r--r--drivers/net/vxge/vxge-ethtool.c112
-rw-r--r--drivers/net/vxge/vxge-main.c580
-rw-r--r--drivers/net/vxge/vxge-main.h81
-rw-r--r--drivers/net/vxge/vxge-reg.h33
-rw-r--r--drivers/net/vxge/vxge-traffic.h28
-rw-r--r--drivers/net/vxge/vxge-version.h31
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/ath.h6
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c38
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h26
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c74
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h18
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c93
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c123
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2654
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c95
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h56
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c79
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c70
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h57
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c91
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c94
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c197
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c82
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c60
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c715
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h5
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h13
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h7
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c56
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c19
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c17
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.h24
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c80
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h6
-rw-r--r--drivers/net/wireless/ath/debug.c9
-rw-r--r--drivers/net/wireless/ath/debug.h2
-rw-r--r--drivers/net/wireless/ath/key.c9
-rw-r--r--drivers/net/wireless/atmel.c6
-rw-r--r--drivers/net/wireless/b43/b43.h13
-rw-r--r--drivers/net/wireless/b43/dma.c5
-rw-r--r--drivers/net/wireless/b43/main.c48
-rw-r--r--drivers/net/wireless/b43/phy_common.c8
-rw-r--r--drivers/net/wireless/b43/phy_n.c100
-rw-r--r--drivers/net/wireless/b43/radio_2055.c256
-rw-r--r--drivers/net/wireless/b43/radio_2056.c6019
-rw-r--r--drivers/net/wireless/b43/radio_2056.h1081
-rw-r--r--drivers/net/wireless/b43/rfkill.c19
-rw-r--r--drivers/net/wireless/b43legacy/main.c47
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig3
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c365
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c92
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c230
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c80
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c632
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c110
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c65
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c504
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c843
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h81
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.c662
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.h79
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c190
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c60
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c2
-rw-r--r--drivers/net/wireless/libertas/cfg.c4
-rw-r--r--drivers/net/wireless/libertas/cmd.c8
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_usb.c13
-rw-r--r--drivers/net/wireless/libertas/main.c3
-rw-r--r--drivers/net/wireless/libertas/rx.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mwl8k.c670
-rw-r--r--drivers/net/wireless/ray_cs.c4
-rw-r--r--drivers/net/wireless/rndis_wlan.c206
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig72
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h62
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c54
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c65
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c278
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c45
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c52
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c45
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h33
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c25
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c28
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c140
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_rtl8225.c22
-rw-r--r--drivers/net/wireless/wl1251/main.c15
-rw-r--r--drivers/net/wireless/wl1251/sdio.c101
-rw-r--r--drivers/net/wireless/wl1251/spi.c9
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h1
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig54
-rw-r--r--drivers/net/wireless/wl12xx/Makefile17
-rw-r--r--drivers/net/wireless/wl12xx/acx.c (renamed from drivers/net/wireless/wl12xx/wl1271_acx.c)95
-rw-r--r--drivers/net/wireless/wl12xx/acx.h (renamed from drivers/net/wireless/wl12xx/wl1271_acx.h)99
-rw-r--r--drivers/net/wireless/wl12xx/boot.c (renamed from drivers/net/wireless/wl12xx/wl1271_boot.c)20
-rw-r--r--drivers/net/wireless/wl12xx/boot.h (renamed from drivers/net/wireless/wl12xx/wl1271_boot.h)2
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c (renamed from drivers/net/wireless/wl12xx/wl1271_cmd.c)12
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h (renamed from drivers/net/wireless/wl12xx/wl1271_cmd.h)54
-rw-r--r--drivers/net/wireless/wl12xx/conf.h (renamed from drivers/net/wireless/wl12xx/wl1271_conf.h)4
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c (renamed from drivers/net/wireless/wl12xx/wl1271_debugfs.c)225
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.h (renamed from drivers/net/wireless/wl12xx/wl1271_debugfs.h)6
-rw-r--r--drivers/net/wireless/wl12xx/event.c (renamed from drivers/net/wireless/wl12xx/wl1271_event.c)14
-rw-r--r--drivers/net/wireless/wl12xx/event.h (renamed from drivers/net/wireless/wl12xx/wl1271_event.h)4
-rw-r--r--drivers/net/wireless/wl12xx/ini.h (renamed from drivers/net/wireless/wl12xx/wl1271_ini.h)4
-rw-r--r--drivers/net/wireless/wl12xx/init.c (renamed from drivers/net/wireless/wl12xx/wl1271_init.c)10
-rw-r--r--drivers/net/wireless/wl12xx/init.h (renamed from drivers/net/wireless/wl12xx/wl1271_init.h)6
-rw-r--r--drivers/net/wireless/wl12xx/io.c (renamed from drivers/net/wireless/wl12xx/wl1271_io.c)4
-rw-r--r--drivers/net/wireless/wl12xx/io.h (renamed from drivers/net/wireless/wl12xx/wl1271_io.h)6
-rw-r--r--drivers/net/wireless/wl12xx/main.c (renamed from drivers/net/wireless/wl12xx/wl1271_main.c)391
-rw-r--r--drivers/net/wireless/wl12xx/ps.c (renamed from drivers/net/wireless/wl12xx/wl1271_ps.c)6
-rw-r--r--drivers/net/wireless/wl12xx/ps.h (renamed from drivers/net/wireless/wl12xx/wl1271_ps.h)8
-rw-r--r--drivers/net/wireless/wl12xx/reg.h (renamed from drivers/net/wireless/wl12xx/wl1271_reg.h)0
-rw-r--r--drivers/net/wireless/wl12xx/rx.c (renamed from drivers/net/wireless/wl12xx/wl1271_rx.c)38
-rw-r--r--drivers/net/wireless/wl12xx/rx.h (renamed from drivers/net/wireless/wl12xx/wl1271_rx.h)6
-rw-r--r--drivers/net/wireless/wl12xx/scan.c (renamed from drivers/net/wireless/wl12xx/wl1271_scan.c)13
-rw-r--r--drivers/net/wireless/wl12xx/scan.h (renamed from drivers/net/wireless/wl12xx/wl1271_scan.h)6
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c (renamed from drivers/net/wireless/wl12xx/wl1271_sdio.c)4
-rw-r--r--drivers/net/wireless/wl12xx/spi.c (renamed from drivers/net/wireless/wl12xx/wl1271_spi.c)6
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c (renamed from drivers/net/wireless/wl12xx/wl1271_testmode.c)18
-rw-r--r--drivers/net/wireless/wl12xx/testmode.h (renamed from drivers/net/wireless/wl12xx/wl1271_testmode.h)4
-rw-r--r--drivers/net/wireless/wl12xx/tx.c (renamed from drivers/net/wireless/wl12xx/wl1271_tx.c)142
-rw-r--r--drivers/net/wireless/wl12xx/tx.h (renamed from drivers/net/wireless/wl12xx/wl1271_tx.h)7
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h (renamed from drivers/net/wireless/wl12xx/wl1271.h)21
-rw-r--r--drivers/net/wireless/zd1201.c1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xilinx_emaclite.c36
-rw-r--r--drivers/net/znet.c2
-rw-r--r--drivers/s390/net/lcs.c10
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c11
-rw-r--r--drivers/s390/net/qeth_l3_main.c23
-rw-r--r--drivers/ssb/pci.c52
-rw-r--r--drivers/ssb/pcihost_wrapper.c7
-rw-r--r--include/linux/average.h32
-rw-r--r--include/linux/bitops.h11
-rw-r--r--include/linux/dccp.h23
-rw-r--r--include/linux/filter.h56
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/if_link.h28
-rw-r--r--include/linux/if_macvlan.h34
-rw-r--r--include/linux/igmp.h14
-rw-r--r--include/linux/inetdevice.h15
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/mdio.h5
-rw-r--r--include/linux/netdevice.h77
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/nl80211.h38
-rw-r--r--include/linux/rfkill.h31
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/stmmac.h6
-rw-r--r--include/linux/usb/usbnet.h6
-rw-r--r--include/linux/wl12xx.h8
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/caif/cfctrl.h2
-rw-r--r--include/net/cfg80211.h32
-rw-r--r--include/net/dn_dev.h27
-rw-r--r--include/net/dn_route.h10
-rw-r--r--include/net/dst.h8
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/if_inet6.h3
-rw-r--r--include/net/inet6_connection_sock.h3
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--include/net/inetpeer.h32
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h13
-rw-r--r--include/net/mac80211.h50
-rw-r--r--include/net/ndisc.h3
-rw-r--r--include/net/neighbour.h10
-rw-r--r--include/net/netlink.h21
-rw-r--r--include/net/netns/generic.h2
-rw-r--r--include/net/regulatory.h7
-rw-r--r--include/net/route.h24
-rw-r--r--include/net/rtnetlink.h35
-rw-r--r--include/net/scm.h5
-rw-r--r--include/net/sctp/command.h3
-rw-r--r--include/net/sctp/constants.h14
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/snmp.h4
-rw-r--r--include/net/sock.h60
-rw-r--r--include/net/tcp.h9
-rw-r--r--include/net/timewait_sock.h8
-rw-r--r--include/net/tipc/tipc.h186
-rw-r--r--include/net/tipc/tipc_bearer.h138
-rw-r--r--include/net/tipc/tipc_msg.h207
-rw-r--r--include/net/tipc/tipc_port.h101
-rw-r--r--include/net/x25.h2
-rw-r--r--include/net/xfrm.h6
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/average.c57
-rw-r--r--lib/nlattr.c22
-rw-r--r--net/8021q/vlan.c13
-rw-r--r--net/8021q/vlan.h22
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c197
-rw-r--r--net/8021q/vlan_netlink.c20
-rw-r--r--net/8021q/vlanproc.c5
-rw-r--r--net/9p/protocol.c33
-rw-r--r--net/Kconfig5
-rw-r--r--net/atm/br2684.c2
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/atm/lec.c3
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bridge/br.c4
-rw-r--r--net/bridge/br_fdb.c15
-rw-r--r--net/bridge/br_forward.c4
-rw-r--r--net/bridge/br_if.c7
-rw-r--r--net/bridge/br_input.c10
-rw-r--r--net/bridge/br_multicast.c78
-rw-r--r--net/bridge/br_netfilter.c22
-rw-r--r--net/bridge/br_netlink.c10
-rw-r--r--net/bridge/br_notify.c6
-rw-r--r--net/bridge/br_private.h21
-rw-r--r--net/bridge/br_stp_bpdu.c8
-rw-r--r--net/bridge/netfilter/ebtable_broute.c3
-rw-r--r--net/bridge/netfilter/ebtables.c11
-rw-r--r--net/caif/Makefile8
-rw-r--r--net/can/Makefile6
-rw-r--r--net/ceph/Makefile2
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c205
-rw-r--r--net/core/ethtool.c4
-rw-r--r--net/core/filter.c444
-rw-r--r--net/core/net-sysfs.c429
-rw-r--r--net/core/net-sysfs.h4
-rw-r--r--net/core/netpoll.c3
-rw-r--r--net/core/pktgen.c39
-rw-r--r--net/core/request_sock.c1
-rw-r--r--net/core/rtnetlink.c166
-rw-r--r--net/core/scm.c10
-rw-r--r--net/core/skbuff.c34
-rw-r--r--net/core/timestamping.c4
-rw-r--r--net/dccp/Makefile4
-rw-r--r--net/dccp/ackvec.c616
-rw-r--r--net/dccp/ackvec.h151
-rw-r--r--net/dccp/ccids/ccid2.c143
-rw-r--r--net/dccp/ccids/ccid2.h2
-rw-r--r--net/dccp/dccp.h24
-rw-r--r--net/dccp/input.c33
-rw-r--r--net/dccp/ipv4.c13
-rw-r--r--net/dccp/options.c100
-rw-r--r--net/dccp/output.c22
-rw-r--r--net/dccp/proto.c71
-rw-r--r--net/dccp/qpolicy.c137
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_dev.c100
-rw-r--r--net/decnet/dn_fib.c6
-rw-r--r--net/decnet/dn_neigh.c2
-rw-r--r--net/decnet/dn_route.c94
-rw-r--r--net/decnet/dn_rules.c2
-rw-r--r--net/dns_resolver/Makefile2
-rw-r--r--net/econet/Makefile2
-rw-r--r--net/ieee802154/af_ieee802154.c6
-rw-r--r--net/ipv4/af_inet.c18
-rw-r--r--net/ipv4/arp.c31
-rw-r--r--net/ipv4/devinet.c91
-rw-r--r--net/ipv4/fib_frontend.c28
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/icmp.c32
-rw-r--r--net/ipv4/igmp.c282
-rw-r--r--net/ipv4/inet_connection_sock.c15
-rw-r--r--net/ipv4/inetpeer.c167
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_gre.c48
-rw-r--r--net/ipv4/ip_output.c25
-rw-r--r--net/ipv4/ipconfig.c32
-rw-r--r--net/ipv4/ipip.c21
-rw-r--r--net/ipv4/ipmr.c20
-rw-r--r--net/ipv4/netfilter.c8
-rw-r--r--net/ipv4/netfilter/Makefile6
-rw-r--r--net/ipv4/raw.c7
-rw-r--r--net/ipv4/route.c102
-rw-r--r--net/ipv4/syncookies.c15
-rw-r--r--net/ipv4/tcp.c16
-rw-r--r--net/ipv4/tcp_ipv4.c72
-rw-r--r--net/ipv4/tcp_minisocks.c63
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv4/udp.c16
-rw-r--r--net/ipv4/xfrm4_policy.c47
-rw-r--r--net/ipv6/addrconf.c114
-rw-r--r--net/ipv6/inet6_connection_sock.c54
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/mcast.c77
-rw-r--r--net/ipv6/ndisc.c24
-rw-r--r--net/ipv6/netfilter.c6
-rw-r--r--net/ipv6/netfilter/Makefile4
-rw-r--r--net/ipv6/reassembly.c36
-rw-r--r--net/ipv6/route.c44
-rw-r--r--net/ipv6/sit.c14
-rw-r--r--net/ipv6/tcp_ipv6.c149
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/irda/ircomm/Makefile4
-rw-r--r--net/irda/irlan/Makefile2
-rw-r--r--net/irda/irnet/Makefile2
-rw-r--r--net/l2tp/l2tp_ip.c12
-rw-r--r--net/lapb/Makefile2
-rw-r--r--net/llc/af_llc.c6
-rw-r--r--net/mac80211/aes_ccm.c3
-rw-r--r--net/mac80211/aes_cmac.c3
-rw-r--r--net/mac80211/cfg.c26
-rw-r--r--net/mac80211/debugfs.c60
-rw-r--r--net/mac80211/debugfs.h2
-rw-r--r--net/mac80211/debugfs_key.c19
-rw-r--r--net/mac80211/debugfs_sta.c26
-rw-r--r--net/mac80211/driver-ops.h37
-rw-r--r--net/mac80211/driver-trace.h71
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/ieee80211_i.h10
-rw-r--r--net/mac80211/key.c9
-rw-r--r--net/mac80211/mlme.c143
-rw-r--r--net/mac80211/rate.c18
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c19
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/sta_info.c17
-rw-r--r--net/mac80211/sta_info.h3
-rw-r--r--net/mac80211/status.c26
-rw-r--r--net/mac80211/tx.c16
-rw-r--r--net/mac80211/util.c40
-rw-r--r--net/mac80211/wme.c11
-rw-r--r--net/netfilter/core.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c42
-rw-r--r--net/netfilter/xt_TEE.c12
-rw-r--r--net/packet/af_packet.c145
-rw-r--r--net/phonet/Makefile4
-rw-r--r--net/rds/Makefile8
-rw-r--r--net/rfkill/core.c14
-rw-r--r--net/rxrpc/Makefile4
-rw-r--r--net/rxrpc/ar-peer.c10
-rw-r--r--net/sched/sch_generic.c12
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/socket.c11
-rw-r--r--net/sunrpc/auth_gss/Makefile4
-rw-r--r--net/tipc/addr.c2
-rw-r--r--net/tipc/bcast.c8
-rw-r--r--net/tipc/bearer.c12
-rw-r--r--net/tipc/bearer.h71
-rw-r--r--net/tipc/cluster.c7
-rw-r--r--net/tipc/config.c16
-rw-r--r--net/tipc/config.h1
-rw-r--r--net/tipc/core.c41
-rw-r--r--net/tipc/core.h14
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/discover.h2
-rw-r--r--net/tipc/eth_media.c6
-rw-r--r--net/tipc/link.c14
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/msg.c2
-rw-r--r--net/tipc/msg.h168
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/name_table.c5
-rw-r--r--net/tipc/net.c5
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tipc/node_subscr.c2
-rw-r--r--net/tipc/port.c115
-rw-r--r--net/tipc/port.h130
-rw-r--r--net/tipc/socket.c7
-rw-r--r--net/tipc/subscr.c8
-rw-r--r--net/tipc/user_reg.c50
-rw-r--r--net/tipc/user_reg.h3
-rw-r--r--net/tipc/zone.c3
-rw-r--r--net/unix/af_unix.c34
-rw-r--r--net/wanrouter/Makefile2
-rw-r--r--net/wireless/core.c8
-rw-r--r--net/wireless/lib80211.c8
-rw-r--r--net/wireless/lib80211_crypt_tkip.c16
-rw-r--r--net/wireless/mlme.c12
-rw-r--r--net/wireless/nl80211.c111
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/reg.c259
-rw-r--r--net/wireless/util.c11
-rw-r--r--net/wireless/wext-core.c10
-rw-r--r--net/x25/af_x25.c95
-rw-r--r--net/x25/x25_link.c8
-rw-r--r--security/selinux/hooks.c12
655 files changed, 32687 insertions, 13939 deletions
diff --git a/Documentation/networking/LICENSE.qlcnic b/Documentation/networking/LICENSE.qlcnic
new file mode 100644
index 00000000000..29ad4b10642
--- /dev/null
+++ b/Documentation/networking/LICENSE.qlcnic
@@ -0,0 +1,327 @@
1Copyright (c) 2009-2010 QLogic Corporation
2QLogic Linux qlcnic NIC Driver
3
4This program includes a device driver for Linux 2.6 that may be
5distributed with QLogic hardware specific firmware binary file.
6You may modify and redistribute the device driver code under the
7GNU General Public License (a copy of which is attached hereto as
8Exhibit A) published by the Free Software Foundation (version 2).
9
10You may redistribute the hardware specific firmware binary file
11under the following terms:
12
13 1. Redistribution of source code (only if applicable),
14 must retain the above copyright notice, this list of
15 conditions and the following disclaimer.
16
17 2. Redistribution in binary form must reproduce the above
18 copyright notice, this list of conditions and the
19 following disclaimer in the documentation and/or other
20 materials provided with the distribution.
21
22 3. The name of QLogic Corporation may not be used to
23 endorse or promote products derived from this software
24 without specific prior written permission
25
26REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
27THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
28EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
30PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
31BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
33TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
35ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38POSSIBILITY OF SUCH DAMAGE.
39
40USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
41CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
42OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
43TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
44ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
45COMBINATION WITH THIS PROGRAM.
46
47
48EXHIBIT A
49
50 GNU GENERAL PUBLIC LICENSE
51 Version 2, June 1991
52
53 Copyright (C) 1989, 1991 Free Software Foundation, Inc.
54 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
55 Everyone is permitted to copy and distribute verbatim copies
56 of this license document, but changing it is not allowed.
57
58 Preamble
59
60 The licenses for most software are designed to take away your
61freedom to share and change it. By contrast, the GNU General Public
62License is intended to guarantee your freedom to share and change free
63software--to make sure the software is free for all its users. This
64General Public License applies to most of the Free Software
65Foundation's software and to any other program whose authors commit to
66using it. (Some other Free Software Foundation software is covered by
67the GNU Lesser General Public License instead.) You can apply it to
68your programs, too.
69
70 When we speak of free software, we are referring to freedom, not
71price. Our General Public Licenses are designed to make sure that you
72have the freedom to distribute copies of free software (and charge for
73this service if you wish), that you receive source code or can get it
74if you want it, that you can change the software or use pieces of it
75in new free programs; and that you know you can do these things.
76
77 To protect your rights, we need to make restrictions that forbid
78anyone to deny you these rights or to ask you to surrender the rights.
79These restrictions translate to certain responsibilities for you if you
80distribute copies of the software, or if you modify it.
81
82 For example, if you distribute copies of such a program, whether
83gratis or for a fee, you must give the recipients all the rights that
84you have. You must make sure that they, too, receive or can get the
85source code. And you must show them these terms so they know their
86rights.
87
88 We protect your rights with two steps: (1) copyright the software, and
89(2) offer you this license which gives you legal permission to copy,
90distribute and/or modify the software.
91
92 Also, for each author's protection and ours, we want to make certain
93that everyone understands that there is no warranty for this free
94software. If the software is modified by someone else and passed on, we
95want its recipients to know that what they have is not the original, so
96that any problems introduced by others will not reflect on the original
97authors' reputations.
98
99 Finally, any free program is threatened constantly by software
100patents. We wish to avoid the danger that redistributors of a free
101program will individually obtain patent licenses, in effect making the
102program proprietary. To prevent this, we have made it clear that any
103patent must be licensed for everyone's free use or not licensed at all.
104
105 The precise terms and conditions for copying, distribution and
106modification follow.
107
108 GNU GENERAL PUBLIC LICENSE
109 TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
110
111 0. This License applies to any program or other work which contains
112a notice placed by the copyright holder saying it may be distributed
113under the terms of this General Public License. The "Program", below,
114refers to any such program or work, and a "work based on the Program"
115means either the Program or any derivative work under copyright law:
116that is to say, a work containing the Program or a portion of it,
117either verbatim or with modifications and/or translated into another
118language. (Hereinafter, translation is included without limitation in
119the term "modification".) Each licensee is addressed as "you".
120
121Activities other than copying, distribution and modification are not
122covered by this License; they are outside its scope. The act of
123running the Program is not restricted, and the output from the Program
124is covered only if its contents constitute a work based on the
125Program (independent of having been made by running the Program).
126Whether that is true depends on what the Program does.
127
128 1. You may copy and distribute verbatim copies of the Program's
129source code as you receive it, in any medium, provided that you
130conspicuously and appropriately publish on each copy an appropriate
131copyright notice and disclaimer of warranty; keep intact all the
132notices that refer to this License and to the absence of any warranty;
133and give any other recipients of the Program a copy of this License
134along with the Program.
135
136You may charge a fee for the physical act of transferring a copy, and
137you may at your option offer warranty protection in exchange for a fee.
138
139 2. You may modify your copy or copies of the Program or any portion
140of it, thus forming a work based on the Program, and copy and
141distribute such modifications or work under the terms of Section 1
142above, provided that you also meet all of these conditions:
143
144 a) You must cause the modified files to carry prominent notices
145 stating that you changed the files and the date of any change.
146
147 b) You must cause any work that you distribute or publish, that in
148 whole or in part contains or is derived from the Program or any
149 part thereof, to be licensed as a whole at no charge to all third
150 parties under the terms of this License.
151
152 c) If the modified program normally reads commands interactively
153 when run, you must cause it, when started running for such
154 interactive use in the most ordinary way, to print or display an
155 announcement including an appropriate copyright notice and a
156 notice that there is no warranty (or else, saying that you provide
157 a warranty) and that users may redistribute the program under
158 these conditions, and telling the user how to view a copy of this
159 License. (Exception: if the Program itself is interactive but
160 does not normally print such an announcement, your work based on
161 the Program is not required to print an announcement.)
162
163These requirements apply to the modified work as a whole. If
164identifiable sections of that work are not derived from the Program,
165and can be reasonably considered independent and separate works in
166themselves, then this License, and its terms, do not apply to those
167sections when you distribute them as separate works. But when you
168distribute the same sections as part of a whole which is a work based
169on the Program, the distribution of the whole must be on the terms of
170this License, whose permissions for other licensees extend to the
171entire whole, and thus to each and every part regardless of who wrote it.
172
173Thus, it is not the intent of this section to claim rights or contest
174your rights to work written entirely by you; rather, the intent is to
175exercise the right to control the distribution of derivative or
176collective works based on the Program.
177
178In addition, mere aggregation of another work not based on the Program
179with the Program (or with a work based on the Program) on a volume of
180a storage or distribution medium does not bring the other work under
181the scope of this License.
182
183 3. You may copy and distribute the Program (or a work based on it,
184under Section 2) in object code or executable form under the terms of
185Sections 1 and 2 above provided that you also do one of the following:
186
187 a) Accompany it with the complete corresponding machine-readable
188 source code, which must be distributed under the terms of Sections
189 1 and 2 above on a medium customarily used for software interchange; or,
190
191 b) Accompany it with a written offer, valid for at least three
192 years, to give any third party, for a charge no more than your
193 cost of physically performing source distribution, a complete
194 machine-readable copy of the corresponding source code, to be
195 distributed under the terms of Sections 1 and 2 above on a medium
196 customarily used for software interchange; or,
197
198 c) Accompany it with the information you received as to the offer
199 to distribute corresponding source code. (This alternative is
200 allowed only for noncommercial distribution and only if you
201 received the program in object code or executable form with such
202 an offer, in accord with Subsection b above.)
203
204The source code for a work means the preferred form of the work for
205making modifications to it. For an executable work, complete source
206code means all the source code for all modules it contains, plus any
207associated interface definition files, plus the scripts used to
208control compilation and installation of the executable. However, as a
209special exception, the source code distributed need not include
210anything that is normally distributed (in either source or binary
211form) with the major components (compiler, kernel, and so on) of the
212operating system on which the executable runs, unless that component
213itself accompanies the executable.
214
215If distribution of executable or object code is made by offering
216access to copy from a designated place, then offering equivalent
217access to copy the source code from the same place counts as
218distribution of the source code, even though third parties are not
219compelled to copy the source along with the object code.
220
221 4. You may not copy, modify, sublicense, or distribute the Program
222except as expressly provided under this License. Any attempt
223otherwise to copy, modify, sublicense or distribute the Program is
224void, and will automatically terminate your rights under this License.
225However, parties who have received copies, or rights, from you under
226this License will not have their licenses terminated so long as such
227parties remain in full compliance.
228
229 5. You are not required to accept this License, since you have not
230signed it. However, nothing else grants you permission to modify or
231distribute the Program or its derivative works. These actions are
232prohibited by law if you do not accept this License. Therefore, by
233modifying or distributing the Program (or any work based on the
234Program), you indicate your acceptance of this License to do so, and
235all its terms and conditions for copying, distributing or modifying
236the Program or works based on it.
237
238 6. Each time you redistribute the Program (or any work based on the
239Program), the recipient automatically receives a license from the
240original licensor to copy, distribute or modify the Program subject to
241these terms and conditions. You may not impose any further
242restrictions on the recipients' exercise of the rights granted herein.
243You are not responsible for enforcing compliance by third parties to
244this License.
245
246 7. If, as a consequence of a court judgment or allegation of patent
247infringement or for any other reason (not limited to patent issues),
248conditions are imposed on you (whether by court order, agreement or
249otherwise) that contradict the conditions of this License, they do not
250excuse you from the conditions of this License. If you cannot
251distribute so as to satisfy simultaneously your obligations under this
252License and any other pertinent obligations, then as a consequence you
253may not distribute the Program at all. For example, if a patent
254license would not permit royalty-free redistribution of the Program by
255all those who receive copies directly or indirectly through you, then
256the only way you could satisfy both it and this License would be to
257refrain entirely from distribution of the Program.
258
259If any portion of this section is held invalid or unenforceable under
260any particular circumstance, the balance of the section is intended to
261apply and the section as a whole is intended to apply in other
262circumstances.
263
264It is not the purpose of this section to induce you to infringe any
265patents or other property right claims or to contest validity of any
266such claims; this section has the sole purpose of protecting the
267integrity of the free software distribution system, which is
268implemented by public license practices. Many people have made
269generous contributions to the wide range of software distributed
270through that system in reliance on consistent application of that
271system; it is up to the author/donor to decide if he or she is willing
272to distribute software through any other system and a licensee cannot
273impose that choice.
274
275This section is intended to make thoroughly clear what is believed to
276be a consequence of the rest of this License.
277
278 8. If the distribution and/or use of the Program is restricted in
279certain countries either by patents or by copyrighted interfaces, the
280original copyright holder who places the Program under this License
281may add an explicit geographical distribution limitation excluding
282those countries, so that distribution is permitted only in or among
283countries not thus excluded. In such case, this License incorporates
284the limitation as if written in the body of this License.
285
286 9. The Free Software Foundation may publish revised and/or new versions
287of the General Public License from time to time. Such new versions will
288be similar in spirit to the present version, but may differ in detail to
289address new problems or concerns.
290
291Each version is given a distinguishing version number. If the Program
292specifies a version number of this License which applies to it and "any
293later version", you have the option of following the terms and conditions
294either of that version or of any later version published by the Free
295Software Foundation. If the Program does not specify a version number of
296this License, you may choose any version ever published by the Free Software
297Foundation.
298
299 10. If you wish to incorporate parts of the Program into other free
300programs whose distribution conditions are different, write to the author
301to ask for permission. For software which is copyrighted by the Free
302Software Foundation, write to the Free Software Foundation; we sometimes
303make exceptions for this. Our decision will be guided by the two goals
304of preserving the free status of all derivatives of our free software and
305of promoting the sharing and reuse of software generally.
306
307 NO WARRANTY
308
309 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
310FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
311OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
312PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
313OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
314MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
315TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
316PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
317REPAIR OR CORRECTION.
318
319 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
320WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
321REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
322INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
323OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
324TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
325YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
326PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
327POSSIBILITY OF SUCH DAMAGES.
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index 271d524a4c8..b395ca6a49f 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -47,6 +47,26 @@ http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree
47 47
48Socket options 48Socket options
49============== 49==============
50DCCP_SOCKOPT_QPOLICY_ID sets the dequeuing policy for outgoing packets. It takes
51a policy ID as argument and can only be set before the connection (i.e. changes
52during an established connection are not supported). Currently, two policies are
53defined: the "simple" policy (DCCPQ_POLICY_SIMPLE), which does nothing special,
54and a priority-based variant (DCCPQ_POLICY_PRIO). The latter allows to pass an
55u32 priority value as ancillary data to sendmsg(), where higher numbers indicate
56a higher packet priority (similar to SO_PRIORITY). This ancillary data needs to
57be formatted using a cmsg(3) message header filled in as follows:
58 cmsg->cmsg_level = SOL_DCCP;
59 cmsg->cmsg_type = DCCP_SCM_PRIORITY;
60 cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t)); /* or CMSG_LEN(4) */
61
62DCCP_SOCKOPT_QPOLICY_TXQLEN sets the maximum length of the output queue. A zero
63value is always interpreted as unbounded queue length. If different from zero,
64the interpretation of this parameter depends on the current dequeuing policy
65(see above): the "simple" policy will enforce a fixed queue size by returning
66EAGAIN, whereas the "prio" policy enforces a fixed queue length by dropping the
67lowest-priority packet first. The default value for this parameter is
68initialised from /proc/sys/net/dccp/default/tx_qlen.
69
50DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of 70DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
51service codes (RFC 4340, sec. 8.1.2); if this socket option is not set, 71service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
52the socket will fall back to 0 (which means that no meaningful service code 72the socket will fall back to 0 (which means that no meaningful service code
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 3c5e465296e..2193a5d124c 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -708,10 +708,28 @@ igmp_max_memberships - INTEGER
708 Change the maximum number of multicast groups we can subscribe to. 708 Change the maximum number of multicast groups we can subscribe to.
709 Default: 20 709 Default: 20
710 710
711conf/interface/* changes special settings per interface (where "interface" is 711 Theoretical maximum value is bounded by having to send a membership
712 the name of your network interface) 712 report in a single datagram (i.e. the report can't span multiple
713conf/all/* is special, changes the settings for all interfaces 713 datagrams, or risk confusing the switch and leaving groups you don't
714 intend to).
714 715
716 The number of supported groups 'M' is bounded by the number of group
717 report entries you can fit into a single datagram of 65535 bytes.
718
719 M = 65536-sizeof (ip header)/(sizeof(Group record))
720
721 Group records are variable length, with a minimum of 12 bytes.
722 So net.ipv4.igmp_max_memberships should not be set higher than:
723
724 (65536-24) / 12 = 5459
725
726 The value 5459 assumes no IP header options, so in practice
727 this number may be lower.
728
729 conf/interface/* changes special settings per interface (where
730 "interface" is the name of your network interface)
731
732 conf/all/* is special, changes the settings for all interfaces
715 733
716log_martians - BOOLEAN 734log_martians - BOOLEAN
717 Log packets with impossible addresses to kernel log. 735 Log packets with impossible addresses to kernel log.
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 7ee770b5ef5..80a7a345490 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -7,7 +7,7 @@ This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
7(Synopsys IP blocks); it has been fully tested on STLinux platforms. 7(Synopsys IP blocks); it has been fully tested on STLinux platforms.
8 8
9Currently this network device driver is for all STM embedded MAC/GMAC 9Currently this network device driver is for all STM embedded MAC/GMAC
10(7xxx SoCs). 10(7xxx SoCs). Other platforms start using it i.e. ARM SPEAr.
11 11
12DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100 12DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
13Universal version 4.0 have been used for developing the first code 13Universal version 4.0 have been used for developing the first code
@@ -95,9 +95,14 @@ Several information came from the platform; please refer to the
95driver's Header file in include/linux directory. 95driver's Header file in include/linux directory.
96 96
97struct plat_stmmacenet_data { 97struct plat_stmmacenet_data {
98 int bus_id; 98 int bus_id;
99 int pbl; 99 int pbl;
100 int has_gmac; 100 int clk_csr;
101 int has_gmac;
102 int enh_desc;
103 int tx_coe;
104 int bugged_jumbo;
105 int pmt;
101 void (*fix_mac_speed)(void *priv, unsigned int speed); 106 void (*fix_mac_speed)(void *priv, unsigned int speed);
102 void (*bus_setup)(unsigned long ioaddr); 107 void (*bus_setup)(unsigned long ioaddr);
103#ifdef CONFIG_STM_DRIVERS 108#ifdef CONFIG_STM_DRIVERS
@@ -114,6 +119,12 @@ Where:
114 registers (on STM platforms); 119 registers (on STM platforms);
115- has_gmac: GMAC core is on board (get it at run-time in the next step); 120- has_gmac: GMAC core is on board (get it at run-time in the next step);
116- bus_id: bus identifier. 121- bus_id: bus identifier.
122- tx_coe: core is able to perform the tx csum in HW.
123- enh_desc: if sets the MAC will use the enhanced descriptor structure.
124- clk_csr: CSR Clock range selection.
125- bugged_jumbo: some HWs are not able to perform the csum in HW for
126 over-sized frames due to limited buffer sizes. Setting this
127 flag the csum will be done in SW on JUMBO frames.
117 128
118struct plat_stmmacphy_data { 129struct plat_stmmacphy_data {
119 int bus_id; 130 int bus_id;
@@ -131,13 +142,28 @@ Where:
131- interface: physical MII interface mode; 142- interface: physical MII interface mode;
132- phy_reset: hook to reset HW function. 143- phy_reset: hook to reset HW function.
133 144
145SOURCES:
146- Kconfig
147- Makefile
148- stmmac_main.c: main network device driver;
149- stmmac_mdio.c: mdio functions;
150- stmmac_ethtool.c: ethtool support;
151- stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts
152 Only tested on ST40 platforms based.
153- stmmac.h: private driver structure;
154- common.h: common definitions and VFTs;
155- descs.h: descriptor structure definitions;
156- dwmac1000_core.c: GMAC core functions;
157- dwmac1000_dma.c: dma functions for the GMAC chip;
158- dwmac1000.h: specific header file for the GMAC;
159- dwmac100_core: MAC 100 core and dma code;
160- dwmac100_dma.c: dma funtions for the MAC chip;
161- dwmac1000.h: specific header file for the MAC;
162- dwmac_lib.c: generic DMA functions shared among chips
163- enh_desc.c: functions for handling enhanced descriptors
164- norm_desc.c: functions for handling normal descriptors
165
134TODO: 166TODO:
135- Continue to make the driver more generic and suitable for other Synopsys 167- XGMAC controller is not supported.
136 Ethernet controllers used on other architectures (i.e. ARM).
137- 10G controllers are not supported.
138- MAC uses Normal descriptors and GMAC uses enhanced ones.
139 This is a limit that should be reviewed. MAC could want to
140 use the enhanced structure.
141- Checksumming: Rx/Tx csum is done in HW in case of GMAC only.
142- Review the timer optimisation code to use an embedded device that seems to be 168- Review the timer optimisation code to use an embedded device that seems to be
143 available in new chip generations. 169 available in new chip generations.
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 89ed1be2d62..8be26150605 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -642,31 +642,13 @@ static void __init omap3pandora_init_irq(void)
642 omap_gpio_init(); 642 omap_gpio_init();
643} 643}
644 644
645static void pandora_wl1251_set_power(bool enable) 645static void __init pandora_wl1251_init(void)
646{
647 /*
648 * Keep power always on until wl1251_sdio driver learns to re-init
649 * the chip after powering it down and back up.
650 */
651}
652
653static struct wl12xx_platform_data pandora_wl1251_pdata = {
654 .set_power = pandora_wl1251_set_power,
655 .use_eeprom = true,
656};
657
658static struct platform_device pandora_wl1251_data = {
659 .name = "wl1251_data",
660 .id = -1,
661 .dev = {
662 .platform_data = &pandora_wl1251_pdata,
663 },
664};
665
666static void pandora_wl1251_init(void)
667{ 646{
647 struct wl12xx_platform_data pandora_wl1251_pdata;
668 int ret; 648 int ret;
669 649
650 memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
651
670 ret = gpio_request(PANDORA_WIFI_IRQ_GPIO, "wl1251 irq"); 652 ret = gpio_request(PANDORA_WIFI_IRQ_GPIO, "wl1251 irq");
671 if (ret < 0) 653 if (ret < 0)
672 goto fail; 654 goto fail;
@@ -679,6 +661,11 @@ static void pandora_wl1251_init(void)
679 if (pandora_wl1251_pdata.irq < 0) 661 if (pandora_wl1251_pdata.irq < 0)
680 goto fail_irq; 662 goto fail_irq;
681 663
664 pandora_wl1251_pdata.use_eeprom = true;
665 ret = wl12xx_set_platform_data(&pandora_wl1251_pdata);
666 if (ret < 0)
667 goto fail_irq;
668
682 return; 669 return;
683 670
684fail_irq: 671fail_irq:
@@ -691,7 +678,6 @@ static struct platform_device *omap3pandora_devices[] __initdata = {
691 &pandora_leds_gpio, 678 &pandora_leds_gpio,
692 &pandora_keys_gpio, 679 &pandora_keys_gpio,
693 &pandora_dss_device, 680 &pandora_dss_device,
694 &pandora_wl1251_data,
695 &pandora_vwlan_device, 681 &pandora_vwlan_device,
696}; 682};
697 683
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index c8fc69c85a0..c0976195935 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -92,7 +92,7 @@
92 92
93#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ]) 93#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
94 94
95#define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo)) 95#define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo))
96 96
97#if 1 97#if 1
98#define ASSERT(expr) if (!(expr)) { \ 98#define ASSERT(expr) if (!(expr)) { \
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index cbe15a86c66..930051d941a 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2241,11 +2241,8 @@ static int __devinit lanai_dev_open(struct atm_dev *atmdev)
2241 memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN); 2241 memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
2242 lanai_timed_poll_start(lanai); 2242 lanai_timed_poll_start(lanai);
2243 printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u " 2243 printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
2244 "(%02X-%02X-%02X-%02X-%02X-%02X)\n", lanai->number, 2244 "(%pMF)\n", lanai->number, (int) lanai->pci->revision,
2245 (int) lanai->pci->revision, (unsigned long) lanai->base, 2245 (unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
2246 lanai->pci->irq,
2247 atmdev->esi[0], atmdev->esi[1], atmdev->esi[2],
2248 atmdev->esi[3], atmdev->esi[4], atmdev->esi[5]);
2249 printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), " 2246 printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
2250 "board_rev=%d\n", lanai->number, 2247 "board_rev=%d\n", lanai->number,
2251 lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno, 2248 lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 5674bd01d96..de0435e63b0 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -297,8 +297,8 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
297 struct sk_buff *skb; 297 struct sk_buff *skb;
298 struct net_device *ifp; 298 struct net_device *ifp;
299 299
300 read_lock(&dev_base_lock); 300 rcu_read_lock();
301 for_each_netdev(&init_net, ifp) { 301 for_each_netdev_rcu(&init_net, ifp) {
302 dev_hold(ifp); 302 dev_hold(ifp);
303 if (!is_aoe_netif(ifp)) 303 if (!is_aoe_netif(ifp))
304 goto cont; 304 goto cont;
@@ -325,7 +325,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
325cont: 325cont:
326 dev_put(ifp); 326 dev_put(ifp);
327 } 327 }
328 read_unlock(&dev_base_lock); 328 rcu_read_unlock();
329} 329}
330 330
331static void 331static void
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a5ea1bce968..8aba0ba57de 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -130,8 +130,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
130 130
131#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 131#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
132 case AF_INET6: 132 case AF_INET6:
133 read_lock(&dev_base_lock); 133 rcu_read_lock();
134 for_each_netdev(&init_net, dev) { 134 for_each_netdev_rcu(&init_net, dev) {
135 if (ipv6_chk_addr(&init_net, 135 if (ipv6_chk_addr(&init_net,
136 &((struct sockaddr_in6 *) addr)->sin6_addr, 136 &((struct sockaddr_in6 *) addr)->sin6_addr,
137 dev, 1)) { 137 dev, 1)) {
@@ -139,7 +139,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
139 break; 139 break;
140 } 140 }
141 } 141 }
142 read_unlock(&dev_base_lock); 142 rcu_read_unlock();
143 break; 143 break;
144#endif 144#endif
145 } 145 }
@@ -200,7 +200,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
200 src_in->sin_family = AF_INET; 200 src_in->sin_family = AF_INET;
201 src_in->sin_addr.s_addr = rt->rt_src; 201 src_in->sin_addr.s_addr = rt->rt_src;
202 202
203 if (rt->idev->dev->flags & IFF_LOOPBACK) { 203 if (rt->dst.dev->flags & IFF_LOOPBACK) {
204 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); 204 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
205 if (!ret) 205 if (!ret)
206 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); 206 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
@@ -208,12 +208,12 @@ static int addr4_resolve(struct sockaddr_in *src_in,
208 } 208 }
209 209
210 /* If the device does ARP internally, return 'done' */ 210 /* If the device does ARP internally, return 'done' */
211 if (rt->idev->dev->flags & IFF_NOARP) { 211 if (rt->dst.dev->flags & IFF_NOARP) {
212 rdma_copy_addr(addr, rt->idev->dev, NULL); 212 rdma_copy_addr(addr, rt->dst.dev, NULL);
213 goto put; 213 goto put;
214 } 214 }
215 215
216 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev); 216 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
217 if (!neigh || !(neigh->nud_state & NUD_VALID)) { 217 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
218 neigh_event_send(rt->dst.neighbour, NULL); 218 neigh_event_send(rt->dst.neighbour, NULL);
219 ret = -ENODATA; 219 ret = -ENODATA;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index bf3e20cd029..4e55a28fb6d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -848,8 +848,8 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
848 goto out; 848 goto out;
849 } 849 }
850 850
851 read_lock(&dev_base_lock); 851 rcu_read_lock();
852 for_each_netdev(&init_net, tmp) { 852 for_each_netdev_rcu(&init_net, tmp) {
853 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) { 853 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
854 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 854 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
855 vid = rdma_vlan_dev_vlan_id(tmp); 855 vid = rdma_vlan_dev_vlan_id(tmp);
@@ -884,7 +884,7 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
884 } 884 }
885 } 885 }
886 } 886 }
887 read_unlock(&dev_base_lock); 887 rcu_read_unlock();
888 888
889 for (i = 0; i < 128; ++i) 889 for (i = 0; i < 128; ++i)
890 if (!hits[i]) { 890 if (!hits[i]) {
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index e90db8870b6..bc0529ac88a 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -420,7 +420,7 @@ enable_hwirq(struct inf_hw *hw)
420 break; 420 break;
421 case INF_NICCY: 421 case INF_NICCY:
422 val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); 422 val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
423 val |= NICCY_IRQ_ENABLE;; 423 val |= NICCY_IRQ_ENABLE;
424 outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); 424 outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
425 break; 425 break;
426 case INF_SCT_1: 426 case INF_SCT_1:
@@ -924,7 +924,7 @@ setup_instance(struct inf_hw *card)
924 mISDNipac_init(&card->ipac, card); 924 mISDNipac_init(&card->ipac, card);
925 925
926 if (card->ipac.isac.dch.dev.Bprotocols == 0) 926 if (card->ipac.isac.dch.dev.Bprotocols == 0)
927 goto error_setup;; 927 goto error_setup;
928 928
929 err = mISDN_register_device(&card->ipac.isac.dch.dev, 929 err = mISDN_register_device(&card->ipac.isac.dch.dev,
930 &card->pdev->dev, card->name); 930 &card->pdev->dev, card->name);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 38eb31439a7..d13fa5b119f 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -264,7 +264,7 @@ load_firmware(struct isar_hw *isar, const u8 *buf, int size)
264 while (noc) { 264 while (noc) {
265 val = le16_to_cpu(*sp++); 265 val = le16_to_cpu(*sp++);
266 *mp++ = val >> 8; 266 *mp++ = val >> 8;
267 *mp++ = val & 0xFF;; 267 *mp++ = val & 0xFF;
268 noc--; 268 noc--;
269 } 269 }
270 spin_lock_irqsave(isar->hwlock, flags); 270 spin_lock_irqsave(isar->hwlock, flags);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index fcf4ed1cb4b..0e66af1decd 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -314,7 +314,7 @@ hdlc_fill_fifo(struct BCState *bcs)
314 bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XME; 314 bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XME;
315 } 315 }
316 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) 316 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
317 debugl1(cs, "hdlc_fill_fifo %d/%ld", count, bcs->tx_skb->len); 317 debugl1(cs, "hdlc_fill_fifo %d/%u", count, bcs->tx_skb->len);
318 p = bcs->tx_skb->data; 318 p = bcs->tx_skb->data;
319 ptr = (u_int *)p; 319 ptr = (u_int *)p;
320 skb_pull(bcs->tx_skb, count); 320 skb_pull(bcs->tx_skb, count);
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index f150330b5a2..37e685eafd2 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -65,7 +65,7 @@ hisax_findcard(int driverid)
65 return (struct IsdnCardState *) 0; 65 return (struct IsdnCardState *) 0;
66} 66}
67 67
68static void 68static __attribute__((format(printf, 3, 4))) void
69link_debug(struct Channel *chanp, int direction, char *fmt, ...) 69link_debug(struct Channel *chanp, int direction, char *fmt, ...)
70{ 70{
71 va_list args; 71 va_list args;
@@ -1068,7 +1068,7 @@ init_d_st(struct Channel *chanp)
1068 return 0; 1068 return 0;
1069} 1069}
1070 1070
1071static void 1071static __attribute__((format(printf, 2, 3))) void
1072callc_debug(struct FsmInst *fi, char *fmt, ...) 1072callc_debug(struct FsmInst *fi, char *fmt, ...)
1073{ 1073{
1074 va_list args; 1074 va_list args;
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b133378d4dc..c110f8679ba 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1917,7 +1917,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
1917#ifdef CONFIG_PCI 1917#ifdef CONFIG_PCI
1918#include <linux/pci.h> 1918#include <linux/pci.h>
1919 1919
1920static struct pci_device_id hisax_pci_tbl[] __devinitdata = { 1920static struct pci_device_id hisax_pci_tbl[] __devinitdata __used = {
1921#ifdef CONFIG_HISAX_FRITZPCI 1921#ifdef CONFIG_HISAX_FRITZPCI
1922 {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) }, 1922 {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
1923#endif 1923#endif
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
index 7250f56a524..a16459a1332 100644
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ b/drivers/isdn/hisax/hfc_2bds0.c
@@ -292,7 +292,7 @@ hfc_fill_fifo(struct BCState *bcs)
292 } 292 }
293 count = GetFreeFifoBytes_B(bcs); 293 count = GetFreeFifoBytes_B(bcs);
294 if (cs->debug & L1_DEB_HSCX) 294 if (cs->debug & L1_DEB_HSCX)
295 debugl1(cs, "hfc_fill_fifo %d count(%ld/%d),%lx", 295 debugl1(cs, "hfc_fill_fifo %d count(%u/%d),%lx",
296 bcs->channel, bcs->tx_skb->len, 296 bcs->channel, bcs->tx_skb->len,
297 count, current->state); 297 count, current->state);
298 if (count < bcs->tx_skb->len) { 298 if (count < bcs->tx_skb->len) {
@@ -719,7 +719,7 @@ hfc_fill_dfifo(struct IsdnCardState *cs)
719 } 719 }
720 count = GetFreeFifoBytes_D(cs); 720 count = GetFreeFifoBytes_D(cs);
721 if (cs->debug & L1_DEB_ISAC) 721 if (cs->debug & L1_DEB_ISAC)
722 debugl1(cs, "hfc_fill_Dfifo count(%ld/%d)", 722 debugl1(cs, "hfc_fill_Dfifo count(%u/%d)",
723 cs->tx_skb->len, count); 723 cs->tx_skb->len, count);
724 if (count < cs->tx_skb->len) { 724 if (count < cs->tx_skb->len) {
725 if (cs->debug & L1_DEB_ISAC) 725 if (cs->debug & L1_DEB_ISAC)
diff --git a/drivers/isdn/hisax/hfc_2bs0.c b/drivers/isdn/hisax/hfc_2bs0.c
index b1f6481e119..626f85df302 100644
--- a/drivers/isdn/hisax/hfc_2bs0.c
+++ b/drivers/isdn/hisax/hfc_2bs0.c
@@ -282,7 +282,7 @@ hfc_fill_fifo(struct BCState *bcs)
282 count += cs->hw.hfc.fifosize; 282 count += cs->hw.hfc.fifosize;
283 } /* L1_MODE_TRANS */ 283 } /* L1_MODE_TRANS */
284 if (cs->debug & L1_DEB_HSCX) 284 if (cs->debug & L1_DEB_HSCX)
285 debugl1(cs, "hfc_fill_fifo %d count(%ld/%d)", 285 debugl1(cs, "hfc_fill_fifo %d count(%u/%d)",
286 bcs->channel, bcs->tx_skb->len, 286 bcs->channel, bcs->tx_skb->len,
287 count); 287 count);
288 if (count < bcs->tx_skb->len) { 288 if (count < bcs->tx_skb->len) {
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 917cc84065b..3147020d188 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -550,7 +550,7 @@ hfcpci_fill_dfifo(struct IsdnCardState *cs)
550 count += D_FIFO_SIZE; /* count now contains available bytes */ 550 count += D_FIFO_SIZE; /* count now contains available bytes */
551 551
552 if (cs->debug & L1_DEB_ISAC) 552 if (cs->debug & L1_DEB_ISAC)
553 debugl1(cs, "hfcpci_fill_Dfifo count(%ld/%d)", 553 debugl1(cs, "hfcpci_fill_Dfifo count(%u/%d)",
554 cs->tx_skb->len, count); 554 cs->tx_skb->len, count);
555 if (count < cs->tx_skb->len) { 555 if (count < cs->tx_skb->len) {
556 if (cs->debug & L1_DEB_ISAC) 556 if (cs->debug & L1_DEB_ISAC)
@@ -681,7 +681,7 @@ hfcpci_fill_fifo(struct BCState *bcs)
681 count += B_FIFO_SIZE; /* count now contains available bytes */ 681 count += B_FIFO_SIZE; /* count now contains available bytes */
682 682
683 if (cs->debug & L1_DEB_HSCX) 683 if (cs->debug & L1_DEB_HSCX)
684 debugl1(cs, "hfcpci_fill_fifo %d count(%ld/%d),%lx", 684 debugl1(cs, "hfcpci_fill_fifo %d count(%u/%d),%lx",
685 bcs->channel, bcs->tx_skb->len, 685 bcs->channel, bcs->tx_skb->len,
686 count, current->state); 686 count, current->state);
687 687
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 5aa138eb0b3..1235b7131ae 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -179,7 +179,7 @@ write_fifo(struct IsdnCardState *cs, struct sk_buff *skb, u_char fifo, int trans
179 count += fifo_size; /* count now contains available bytes */ 179 count += fifo_size; /* count now contains available bytes */
180 180
181 if (cs->debug & L1_DEB_ISAC_FIFO) 181 if (cs->debug & L1_DEB_ISAC_FIFO)
182 debugl1(cs, "hfcsx_write_fifo %d count(%ld/%d)", 182 debugl1(cs, "hfcsx_write_fifo %d count(%u/%d)",
183 fifo, skb->len, count); 183 fifo, skb->len, count);
184 if (count < skb->len) { 184 if (count < skb->len) {
185 if (cs->debug & L1_DEB_ISAC_FIFO) 185 if (cs->debug & L1_DEB_ISAC_FIFO)
@@ -265,7 +265,7 @@ read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
265 count++; 265 count++;
266 266
267 if (cs->debug & L1_DEB_ISAC_FIFO) 267 if (cs->debug & L1_DEB_ISAC_FIFO)
268 debugl1(cs, "hfcsx_read_fifo %d count %ld)", 268 debugl1(cs, "hfcsx_read_fifo %d count %u)",
269 fifo, count); 269 fifo, count);
270 270
271 if ((count > fifo_size) || (count < 4)) { 271 if ((count > fifo_size) || (count < 4)) {
@@ -986,7 +986,7 @@ HFCSX_l1hw(struct PStack *st, int pr, void *arg)
986 default: 986 default:
987 spin_unlock_irqrestore(&cs->lock, flags); 987 spin_unlock_irqrestore(&cs->lock, flags);
988 if (cs->debug & L1_DEB_WARN) 988 if (cs->debug & L1_DEB_WARN)
989 debugl1(cs, "hfcsx_l1hw loop invalid %4lx", arg); 989 debugl1(cs, "hfcsx_l1hw loop invalid %4lx", (unsigned long)arg);
990 return; 990 return;
991 } 991 }
992 cs->hw.hfcsx.trm |= 0x80; /* enable IOM-loop */ 992 cs->hw.hfcsx.trm |= 0x80; /* enable IOM-loop */
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 32ab3924aa7..de1c669c7b1 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -1286,7 +1286,9 @@ int jiftime(char *s, long mark);
1286 1286
1287int HiSax_command(isdn_ctrl * ic); 1287int HiSax_command(isdn_ctrl * ic);
1288int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb); 1288int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
1289__attribute__((format(printf, 3, 4)))
1289void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...); 1290void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...);
1291__attribute__((format(printf, 3, 0)))
1290void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args); 1292void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args);
1291void HiSax_reportcard(int cardnr, int sel); 1293void HiSax_reportcard(int cardnr, int sel);
1292int QuickHex(char *txt, u_char * p, int cnt); 1294int QuickHex(char *txt, u_char * p, int cnt);
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 751b25f2ff5..332104103e1 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -717,7 +717,7 @@ bch_mode(struct BCState *bcs, int mode, int bc)
717 717
718 bc = bc ? 1 : 0; // in case bc is greater than 1 718 bc = bc ? 1 : 0; // in case bc is greater than 1
719 if (cs->debug & L1_DEB_HSCX) 719 if (cs->debug & L1_DEB_HSCX)
720 debugl1(cs, "mode_bch() switch B-% mode %d chan %d", hscx, mode, bc); 720 debugl1(cs, "mode_bch() switch B-%d mode %d chan %d", hscx, mode, bc);
721 bcs->mode = mode; 721 bcs->mode = mode;
722 bcs->channel = bc; 722 bcs->channel = bc;
723 723
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 2e72227bd07..1be4552d94b 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -953,7 +953,7 @@ isar_pump_statev_modem(struct BCState *bcs, u_char devt) {
953 break; 953 break;
954 case PSEV_GSTN_CLR: 954 case PSEV_GSTN_CLR:
955 if (cs->debug & L1_DEB_HSCX) 955 if (cs->debug & L1_DEB_HSCX)
956 debugl1(cs, "pump stev GSTN CLEAR", devt); 956 debugl1(cs, "pump stev GSTN CLEAR");
957 break; 957 break;
958 default: 958 default:
959 if (cs->debug & L1_DEB_HSCX) 959 if (cs->debug & L1_DEB_HSCX)
@@ -1268,7 +1268,7 @@ isar_int_main(struct IsdnCardState *cs)
1268static void 1268static void
1269ftimer_handler(struct BCState *bcs) { 1269ftimer_handler(struct BCState *bcs) {
1270 if (bcs->cs->debug) 1270 if (bcs->cs->debug)
1271 debugl1(bcs->cs, "ftimer flags %04x", 1271 debugl1(bcs->cs, "ftimer flags %04lx",
1272 bcs->Flag); 1272 bcs->Flag);
1273 test_and_clear_bit(BC_FLG_FTI_RUN, &bcs->Flag); 1273 test_and_clear_bit(BC_FLG_FTI_RUN, &bcs->Flag);
1274 if (test_and_clear_bit(BC_FLG_LL_CONN, &bcs->Flag)) { 1274 if (test_and_clear_bit(BC_FLG_LL_CONN, &bcs->Flag)) {
@@ -1748,7 +1748,7 @@ isar_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) {
1748 struct BCState *bcs; 1748 struct BCState *bcs;
1749 1749
1750 if (cs->debug & L1_DEB_HSCX) 1750 if (cs->debug & L1_DEB_HSCX)
1751 debugl1(cs, "isar_auxcmd cmd/ch %x/%d", ic->command, ic->arg); 1751 debugl1(cs, "isar_auxcmd cmd/ch %x/%ld", ic->command, ic->arg);
1752 switch (ic->command) { 1752 switch (ic->command) {
1753 case (ISDN_CMD_FAXCMD): 1753 case (ISDN_CMD_FAXCMD):
1754 bcs = cs->channel[ic->arg].bcs; 1754 bcs = cs->channel[ic->arg].bcs;
diff --git a/drivers/isdn/hisax/isdnl1.h b/drivers/isdn/hisax/isdnl1.h
index 172ad4c8c96..425d86116f2 100644
--- a/drivers/isdn/hisax/isdnl1.h
+++ b/drivers/isdn/hisax/isdnl1.h
@@ -21,6 +21,7 @@
21#define B_XMTBUFREADY 1 21#define B_XMTBUFREADY 1
22#define B_ACKPENDING 2 22#define B_ACKPENDING 2
23 23
24__attribute__((format(printf, 2, 3)))
24void debugl1(struct IsdnCardState *cs, char *fmt, ...); 25void debugl1(struct IsdnCardState *cs, char *fmt, ...);
25void DChannel_proc_xmt(struct IsdnCardState *cs); 26void DChannel_proc_xmt(struct IsdnCardState *cs);
26void DChannel_proc_rcv(struct IsdnCardState *cs); 27void DChannel_proc_rcv(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c
index fd0b643ab74..ad291f21b20 100644
--- a/drivers/isdn/hisax/isdnl3.c
+++ b/drivers/isdn/hisax/isdnl3.c
@@ -66,7 +66,7 @@ static char *strL3Event[] =
66 "EV_TIMEOUT", 66 "EV_TIMEOUT",
67}; 67};
68 68
69static void 69static __attribute__((format(printf, 2, 3))) void
70l3m_debug(struct FsmInst *fi, char *fmt, ...) 70l3m_debug(struct FsmInst *fi, char *fmt, ...)
71{ 71{
72 va_list args; 72 va_list args;
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index 5d7f0f2ff9b..644891efc26 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -254,7 +254,7 @@ static int make_raw_data(struct BCState *bcs) {
254 val >>= 1; 254 val >>= 1;
255 } 255 }
256 if (bcs->cs->debug & L1_DEB_HSCX) 256 if (bcs->cs->debug & L1_DEB_HSCX)
257 debugl1(bcs->cs,"tiger make_raw: in %ld out %d.%d", 257 debugl1(bcs->cs,"tiger make_raw: in %u out %d.%d",
258 bcs->tx_skb->len, s_cnt, bitcnt); 258 bcs->tx_skb->len, s_cnt, bitcnt);
259 if (bitcnt) { 259 if (bitcnt) {
260 while (8>bitcnt++) { 260 while (8>bitcnt++) {
@@ -361,7 +361,7 @@ static int make_raw_data_56k(struct BCState *bcs) {
361 val >>= 1; 361 val >>= 1;
362 } 362 }
363 if (bcs->cs->debug & L1_DEB_HSCX) 363 if (bcs->cs->debug & L1_DEB_HSCX)
364 debugl1(bcs->cs,"tiger make_raw_56k: in %ld out %d.%d", 364 debugl1(bcs->cs,"tiger make_raw_56k: in %u out %d.%d",
365 bcs->tx_skb->len, s_cnt, bitcnt); 365 bcs->tx_skb->len, s_cnt, bitcnt);
366 if (bitcnt) { 366 if (bitcnt) {
367 while (8>bitcnt++) { 367 while (8>bitcnt++) {
@@ -612,7 +612,7 @@ void netjet_fill_dma(struct BCState *bcs)
612 if (!bcs->tx_skb) 612 if (!bcs->tx_skb)
613 return; 613 return;
614 if (bcs->cs->debug & L1_DEB_HSCX) 614 if (bcs->cs->debug & L1_DEB_HSCX)
615 debugl1(bcs->cs,"tiger fill_dma1: c%d %4x", bcs->channel, 615 debugl1(bcs->cs,"tiger fill_dma1: c%d %4lx", bcs->channel,
616 bcs->Flag); 616 bcs->Flag);
617 if (test_and_set_bit(BC_FLG_BUSY, &bcs->Flag)) 617 if (test_and_set_bit(BC_FLG_BUSY, &bcs->Flag))
618 return; 618 return;
@@ -625,7 +625,7 @@ void netjet_fill_dma(struct BCState *bcs)
625 return; 625 return;
626 }; 626 };
627 if (bcs->cs->debug & L1_DEB_HSCX) 627 if (bcs->cs->debug & L1_DEB_HSCX)
628 debugl1(bcs->cs,"tiger fill_dma2: c%d %4x", bcs->channel, 628 debugl1(bcs->cs,"tiger fill_dma2: c%d %4lx", bcs->channel,
629 bcs->Flag); 629 bcs->Flag);
630 if (test_and_clear_bit(BC_FLG_NOFRAME, &bcs->Flag)) { 630 if (test_and_clear_bit(BC_FLG_NOFRAME, &bcs->Flag)) {
631 write_raw(bcs, bcs->hw.tiger.sendp, bcs->hw.tiger.free); 631 write_raw(bcs, bcs->hw.tiger.sendp, bcs->hw.tiger.free);
@@ -667,7 +667,7 @@ void netjet_fill_dma(struct BCState *bcs)
667 write_raw(bcs, p, cnt); 667 write_raw(bcs, p, cnt);
668 } 668 }
669 if (bcs->cs->debug & L1_DEB_HSCX) 669 if (bcs->cs->debug & L1_DEB_HSCX)
670 debugl1(bcs->cs,"tiger fill_dma3: c%d %4x", bcs->channel, 670 debugl1(bcs->cs,"tiger fill_dma3: c%d %4lx", bcs->channel,
671 bcs->Flag); 671 bcs->Flag);
672} 672}
673 673
diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c
index b7876b19fe7..44082637a09 100644
--- a/drivers/isdn/hisax/st5481_d.c
+++ b/drivers/isdn/hisax/st5481_d.c
@@ -167,7 +167,8 @@ static struct FsmNode L1FnList[] __initdata =
167 {ST_L1_F8, EV_IND_RSY, l1_ignore}, 167 {ST_L1_F8, EV_IND_RSY, l1_ignore},
168}; 168};
169 169
170static void l1m_debug(struct FsmInst *fi, char *fmt, ...) 170static __attribute__((format(printf, 2, 3)))
171void l1m_debug(struct FsmInst *fi, char *fmt, ...)
171{ 172{
172 va_list args; 173 va_list args;
173 char buf[256]; 174 char buf[256];
@@ -269,7 +270,8 @@ static char *strDoutEvent[] =
269 "EV_DOUT_UNDERRUN", 270 "EV_DOUT_UNDERRUN",
270}; 271};
271 272
272static void dout_debug(struct FsmInst *fi, char *fmt, ...) 273static __attribute__((format(printf, 2, 3)))
274void dout_debug(struct FsmInst *fi, char *fmt, ...)
273{ 275{
274 va_list args; 276 va_list args;
275 char buf[256]; 277 char buf[256];
diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
index 46048e55f24..d568689669f 100644
--- a/drivers/isdn/i4l/isdn_concap.c
+++ b/drivers/isdn/i4l/isdn_concap.c
@@ -61,7 +61,7 @@ static int isdn_concap_dl_data_req(struct concap_proto *concap, struct sk_buff *
61static int isdn_concap_dl_connect_req(struct concap_proto *concap) 61static int isdn_concap_dl_connect_req(struct concap_proto *concap)
62{ 62{
63 struct net_device *ndev = concap -> net_dev; 63 struct net_device *ndev = concap -> net_dev;
64 isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); 64 isdn_net_local *lp = netdev_priv(ndev);
65 int ret; 65 int ret;
66 IX25DEBUG( "isdn_concap_dl_connect_req: %s \n", ndev -> name); 66 IX25DEBUG( "isdn_concap_dl_connect_req: %s \n", ndev -> name);
67 67
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 26d44c3ca1d..afeede7ee29 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -827,7 +827,7 @@ isdn_net_dial(void)
827void 827void
828isdn_net_hangup(struct net_device *d) 828isdn_net_hangup(struct net_device *d)
829{ 829{
830 isdn_net_local *lp = (isdn_net_local *) netdev_priv(d); 830 isdn_net_local *lp = netdev_priv(d);
831 isdn_ctrl cmd; 831 isdn_ctrl cmd;
832#ifdef CONFIG_ISDN_X25 832#ifdef CONFIG_ISDN_X25
833 struct concap_proto *cprot = lp->netdev->cprot; 833 struct concap_proto *cprot = lp->netdev->cprot;
@@ -1052,7 +1052,7 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
1052{ 1052{
1053 isdn_net_dev *nd; 1053 isdn_net_dev *nd;
1054 isdn_net_local *slp; 1054 isdn_net_local *slp;
1055 isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); 1055 isdn_net_local *lp = netdev_priv(ndev);
1056 int retv = NETDEV_TX_OK; 1056 int retv = NETDEV_TX_OK;
1057 1057
1058 if (((isdn_net_local *) netdev_priv(ndev))->master) { 1058 if (((isdn_net_local *) netdev_priv(ndev))->master) {
@@ -1116,7 +1116,7 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
1116static void 1116static void
1117isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev) 1117isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
1118{ 1118{
1119 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 1119 isdn_net_local *lp = netdev_priv(dev);
1120 if (!skb) 1120 if (!skb)
1121 return; 1121 return;
1122 if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { 1122 if (lp->p_encap == ISDN_NET_ENCAP_ETHER) {
@@ -1131,7 +1131,7 @@ isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
1131 1131
1132static void isdn_net_tx_timeout(struct net_device * ndev) 1132static void isdn_net_tx_timeout(struct net_device * ndev)
1133{ 1133{
1134 isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); 1134 isdn_net_local *lp = netdev_priv(ndev);
1135 1135
1136 printk(KERN_WARNING "isdn_tx_timeout dev %s dialstate %d\n", ndev->name, lp->dialstate); 1136 printk(KERN_WARNING "isdn_tx_timeout dev %s dialstate %d\n", ndev->name, lp->dialstate);
1137 if (!lp->dialstate){ 1137 if (!lp->dialstate){
@@ -1165,7 +1165,7 @@ static void isdn_net_tx_timeout(struct net_device * ndev)
1165static netdev_tx_t 1165static netdev_tx_t
1166isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1166isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1167{ 1167{
1168 isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); 1168 isdn_net_local *lp = netdev_priv(ndev);
1169#ifdef CONFIG_ISDN_X25 1169#ifdef CONFIG_ISDN_X25
1170 struct concap_proto * cprot = lp -> netdev -> cprot; 1170 struct concap_proto * cprot = lp -> netdev -> cprot;
1171/* At this point hard_start_xmit() passes control to the encapsulation 1171/* At this point hard_start_xmit() passes control to the encapsulation
@@ -1347,7 +1347,7 @@ isdn_net_close(struct net_device *dev)
1347static struct net_device_stats * 1347static struct net_device_stats *
1348isdn_net_get_stats(struct net_device *dev) 1348isdn_net_get_stats(struct net_device *dev)
1349{ 1349{
1350 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 1350 isdn_net_local *lp = netdev_priv(dev);
1351 return &lp->stats; 1351 return &lp->stats;
1352} 1352}
1353 1353
@@ -1426,7 +1426,7 @@ isdn_net_ciscohdlck_alloc_skb(isdn_net_local *lp, int len)
1426static int 1426static int
1427isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1427isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1428{ 1428{
1429 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 1429 isdn_net_local *lp = netdev_priv(dev);
1430 unsigned long len = 0; 1430 unsigned long len = 0;
1431 unsigned long expires = 0; 1431 unsigned long expires = 0;
1432 int tmp = 0; 1432 int tmp = 0;
@@ -1493,7 +1493,7 @@ isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1493static int isdn_net_ioctl(struct net_device *dev, 1493static int isdn_net_ioctl(struct net_device *dev,
1494 struct ifreq *ifr, int cmd) 1494 struct ifreq *ifr, int cmd)
1495{ 1495{
1496 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 1496 isdn_net_local *lp = netdev_priv(dev);
1497 1497
1498 switch (lp->p_encap) { 1498 switch (lp->p_encap) {
1499#ifdef CONFIG_ISDN_PPP 1499#ifdef CONFIG_ISDN_PPP
@@ -1786,7 +1786,7 @@ isdn_net_ciscohdlck_receive(isdn_net_local *lp, struct sk_buff *skb)
1786static void 1786static void
1787isdn_net_receive(struct net_device *ndev, struct sk_buff *skb) 1787isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
1788{ 1788{
1789 isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); 1789 isdn_net_local *lp = netdev_priv(ndev);
1790 isdn_net_local *olp = lp; /* original 'lp' */ 1790 isdn_net_local *olp = lp; /* original 'lp' */
1791#ifdef CONFIG_ISDN_X25 1791#ifdef CONFIG_ISDN_X25
1792 struct concap_proto *cprot = lp -> netdev -> cprot; 1792 struct concap_proto *cprot = lp -> netdev -> cprot;
@@ -1800,7 +1800,7 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
1800 * handle master's statistics and hangup-timeout 1800 * handle master's statistics and hangup-timeout
1801 */ 1801 */
1802 ndev = lp->master; 1802 ndev = lp->master;
1803 lp = (isdn_net_local *) netdev_priv(ndev); 1803 lp = netdev_priv(ndev);
1804 lp->stats.rx_packets++; 1804 lp->stats.rx_packets++;
1805 lp->stats.rx_bytes += skb->len; 1805 lp->stats.rx_bytes += skb->len;
1806 } 1806 }
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index fe824e0cbb2..9e8162c80bb 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1147,15 +1147,14 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
1147 } 1147 }
1148 1148
1149 if (is->pass_filter 1149 if (is->pass_filter
1150 && sk_run_filter(skb, is->pass_filter, is->pass_len) == 0) { 1150 && sk_run_filter(skb, is->pass_filter) == 0) {
1151 if (is->debug & 0x2) 1151 if (is->debug & 0x2)
1152 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n"); 1152 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
1153 kfree_skb(skb); 1153 kfree_skb(skb);
1154 return; 1154 return;
1155 } 1155 }
1156 if (!(is->active_filter 1156 if (!(is->active_filter
1157 && sk_run_filter(skb, is->active_filter, 1157 && sk_run_filter(skb, is->active_filter) == 0)) {
1158 is->active_len) == 0)) {
1159 if (is->debug & 0x2) 1158 if (is->debug & 0x2)
1160 printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n"); 1159 printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n");
1161 lp->huptimer = 0; 1160 lp->huptimer = 0;
@@ -1221,7 +1220,7 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
1221 struct ippp_struct *ipt,*ipts; 1220 struct ippp_struct *ipt,*ipts;
1222 int slot, retval = NETDEV_TX_OK; 1221 int slot, retval = NETDEV_TX_OK;
1223 1222
1224 mlp = (isdn_net_local *) netdev_priv(netdev); 1223 mlp = netdev_priv(netdev);
1225 nd = mlp->netdev; /* get master lp */ 1224 nd = mlp->netdev; /* get master lp */
1226 1225
1227 slot = mlp->ppp_slot; 1226 slot = mlp->ppp_slot;
@@ -1294,15 +1293,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
1294 } 1293 }
1295 1294
1296 if (ipt->pass_filter 1295 if (ipt->pass_filter
1297 && sk_run_filter(skb, ipt->pass_filter, ipt->pass_len) == 0) { 1296 && sk_run_filter(skb, ipt->pass_filter) == 0) {
1298 if (ipt->debug & 0x4) 1297 if (ipt->debug & 0x4)
1299 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n"); 1298 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
1300 kfree_skb(skb); 1299 kfree_skb(skb);
1301 goto unlock; 1300 goto unlock;
1302 } 1301 }
1303 if (!(ipt->active_filter 1302 if (!(ipt->active_filter
1304 && sk_run_filter(skb, ipt->active_filter, 1303 && sk_run_filter(skb, ipt->active_filter) == 0)) {
1305 ipt->active_len) == 0)) {
1306 if (ipt->debug & 0x4) 1304 if (ipt->debug & 0x4)
1307 printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n"); 1305 printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n");
1308 lp->huptimer = 0; 1306 lp->huptimer = 0;
@@ -1492,9 +1490,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
1492 } 1490 }
1493 1491
1494 drop |= is->pass_filter 1492 drop |= is->pass_filter
1495 && sk_run_filter(skb, is->pass_filter, is->pass_len) == 0; 1493 && sk_run_filter(skb, is->pass_filter) == 0;
1496 drop |= is->active_filter 1494 drop |= is->active_filter
1497 && sk_run_filter(skb, is->active_filter, is->active_len) == 0; 1495 && sk_run_filter(skb, is->active_filter) == 0;
1498 1496
1499 skb_push(skb, IPPP_MAX_HEADER - 4); 1497 skb_push(skb, IPPP_MAX_HEADER - 4);
1500 return drop; 1498 return drop;
@@ -1985,7 +1983,7 @@ isdn_ppp_dev_ioctl_stats(int slot, struct ifreq *ifr, struct net_device *dev)
1985{ 1983{
1986 struct ppp_stats __user *res = ifr->ifr_data; 1984 struct ppp_stats __user *res = ifr->ifr_data;
1987 struct ppp_stats t; 1985 struct ppp_stats t;
1988 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 1986 isdn_net_local *lp = netdev_priv(dev);
1989 1987
1990 if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats))) 1988 if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats)))
1991 return -EFAULT; 1989 return -EFAULT;
@@ -2024,7 +2022,7 @@ isdn_ppp_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2024{ 2022{
2025 int error=0; 2023 int error=0;
2026 int len; 2024 int len;
2027 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 2025 isdn_net_local *lp = netdev_priv(dev);
2028 2026
2029 2027
2030 if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) 2028 if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP)
@@ -2091,7 +2089,7 @@ isdn_ppp_dial_slave(char *name)
2091 2089
2092 sdev = lp->slave; 2090 sdev = lp->slave;
2093 while (sdev) { 2091 while (sdev) {
2094 isdn_net_local *mlp = (isdn_net_local *) netdev_priv(sdev); 2092 isdn_net_local *mlp = netdev_priv(sdev);
2095 if (!(mlp->flags & ISDN_NET_CONNECTED)) 2093 if (!(mlp->flags & ISDN_NET_CONNECTED))
2096 break; 2094 break;
2097 sdev = mlp->slave; 2095 sdev = mlp->slave;
@@ -2099,7 +2097,7 @@ isdn_ppp_dial_slave(char *name)
2099 if (!sdev) 2097 if (!sdev)
2100 return 2; 2098 return 2;
2101 2099
2102 isdn_net_dial_req((isdn_net_local *) netdev_priv(sdev)); 2100 isdn_net_dial_req(netdev_priv(sdev));
2103 return 0; 2101 return 0;
2104#else 2102#else
2105 return -1; 2103 return -1;
@@ -2122,7 +2120,7 @@ isdn_ppp_hangup_slave(char *name)
2122 2120
2123 sdev = lp->slave; 2121 sdev = lp->slave;
2124 while (sdev) { 2122 while (sdev) {
2125 isdn_net_local *mlp = (isdn_net_local *) netdev_priv(sdev); 2123 isdn_net_local *mlp = netdev_priv(sdev);
2126 2124
2127 if (mlp->slave) { /* find last connected link in chain */ 2125 if (mlp->slave) { /* find last connected link in chain */
2128 isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp); 2126 isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp);
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index ac4aa18c632..5cc7c001c52 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -99,12 +99,16 @@ static void
99l1m_debug(struct FsmInst *fi, char *fmt, ...) 99l1m_debug(struct FsmInst *fi, char *fmt, ...)
100{ 100{
101 struct layer1 *l1 = fi->userdata; 101 struct layer1 *l1 = fi->userdata;
102 struct va_format vaf;
102 va_list va; 103 va_list va;
103 104
104 va_start(va, fmt); 105 va_start(va, fmt);
105 printk(KERN_DEBUG "%s: ", dev_name(&l1->dch->dev.dev)); 106
106 vprintk(fmt, va); 107 vaf.fmt = fmt;
107 printk("\n"); 108 vaf.va = &va;
109
110 printk(KERN_DEBUG "%s: %pV\n", dev_name(&l1->dch->dev.dev), &vaf);
111
108 va_end(va); 112 va_end(va);
109} 113}
110 114
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index c9737178876..4ae75053c9d 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -95,14 +95,20 @@ static void
95l2m_debug(struct FsmInst *fi, char *fmt, ...) 95l2m_debug(struct FsmInst *fi, char *fmt, ...)
96{ 96{
97 struct layer2 *l2 = fi->userdata; 97 struct layer2 *l2 = fi->userdata;
98 struct va_format vaf;
98 va_list va; 99 va_list va;
99 100
100 if (!(*debug & DEBUG_L2_FSM)) 101 if (!(*debug & DEBUG_L2_FSM))
101 return; 102 return;
103
102 va_start(va, fmt); 104 va_start(va, fmt);
103 printk(KERN_DEBUG "l2 (sapi %d tei %d): ", l2->sapi, l2->tei); 105
104 vprintk(fmt, va); 106 vaf.fmt = fmt;
105 printk("\n"); 107 vaf.va = &va;
108
109 printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n",
110 l2->sapi, l2->tei, &vaf);
111
106 va_end(va); 112 va_end(va);
107} 113}
108 114
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 1b85d9d2749..687c9b6264a 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -79,14 +79,19 @@ static void
79da_debug(struct FsmInst *fi, char *fmt, ...) 79da_debug(struct FsmInst *fi, char *fmt, ...)
80{ 80{
81 struct manager *mgr = fi->userdata; 81 struct manager *mgr = fi->userdata;
82 struct va_format vaf;
82 va_list va; 83 va_list va;
83 84
84 if (!(*debug & DEBUG_L2_TEIFSM)) 85 if (!(*debug & DEBUG_L2_TEIFSM))
85 return; 86 return;
87
86 va_start(va, fmt); 88 va_start(va, fmt);
87 printk(KERN_DEBUG "mgr(%d): ", mgr->ch.st->dev->id); 89
88 vprintk(fmt, va); 90 vaf.fmt = fmt;
89 printk("\n"); 91 vaf.va = &va;
92
93 printk(KERN_DEBUG "mgr(%d): %pV\n", mgr->ch.st->dev->id, &vaf);
94
90 va_end(va); 95 va_end(va);
91} 96}
92 97
@@ -223,14 +228,20 @@ static void
223tei_debug(struct FsmInst *fi, char *fmt, ...) 228tei_debug(struct FsmInst *fi, char *fmt, ...)
224{ 229{
225 struct teimgr *tm = fi->userdata; 230 struct teimgr *tm = fi->userdata;
231 struct va_format vaf;
226 va_list va; 232 va_list va;
227 233
228 if (!(*debug & DEBUG_L2_TEIFSM)) 234 if (!(*debug & DEBUG_L2_TEIFSM))
229 return; 235 return;
236
230 va_start(va, fmt); 237 va_start(va, fmt);
231 printk(KERN_DEBUG "sapi(%d) tei(%d): ", tm->l2->sapi, tm->l2->tei); 238
232 vprintk(fmt, va); 239 vaf.fmt = fmt;
233 printk("\n"); 240 vaf.va = &va;
241
242 printk(KERN_DEBUG "sapi(%d) tei(%d): %pV\n",
243 tm->l2->sapi, tm->l2->tei, &vaf);
244
234 va_end(va); 245 va_end(va);
235} 246}
236 247
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index ea9b7a098c9..475a66d95b3 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -201,7 +201,7 @@ struct net_local {
201#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */ 201#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
202#define RX_BUF_END (dev->mem_end - dev->mem_start) 202#define RX_BUF_END (dev->mem_end - dev->mem_start)
203 203
204#define TX_TIMEOUT 5 204#define TX_TIMEOUT (HZ/20)
205 205
206/* 206/*
207 That's it: only 86 bytes to set up the beast, including every extra 207 That's it: only 86 bytes to set up the beast, including every extra
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index cdf7226a7c4..d2bb4b254c5 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -98,7 +98,7 @@ static int rx_nocopy, rx_copy, queued_packet;
98#define WAIT_TX_AVAIL 200 98#define WAIT_TX_AVAIL 200
99 99
100/* Operational parameter that usually are not changed. */ 100/* Operational parameter that usually are not changed. */
101#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */ 101#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */
102 102
103/* The size here is somewhat misleading: the Corkscrew also uses the ISA 103/* The size here is somewhat misleading: the Corkscrew also uses the ISA
104 aliased registers at <base>+0x400. 104 aliased registers at <base>+0x400.
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index e2c9c5b949f..be1f1970c84 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -191,7 +191,7 @@ enum commands {
191#define RX_SUSPEND 0x0030 191#define RX_SUSPEND 0x0030
192#define RX_ABORT 0x0040 192#define RX_ABORT 0x0040
193 193
194#define TX_TIMEOUT 5 194#define TX_TIMEOUT (HZ/20)
195 195
196 196
197struct i596_reg { 197struct i596_reg {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a445a0492c5..a20693fcb32 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1533,7 +1533,7 @@ config E100
1533 1533
1534 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 1534 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
1535 1535
1536 to identify the adapter. 1536 to identify the adapter.
1537 1537
1538 For the latest Intel PRO/100 network driver for Linux, see: 1538 For the latest Intel PRO/100 network driver for Linux, see:
1539 1539
@@ -1786,17 +1786,17 @@ config KS8842
1786 tristate "Micrel KSZ8841/42 with generic bus interface" 1786 tristate "Micrel KSZ8841/42 with generic bus interface"
1787 depends on HAS_IOMEM && DMA_ENGINE 1787 depends on HAS_IOMEM && DMA_ENGINE
1788 help 1788 help
1789 This platform driver is for KSZ8841(1-port) / KS8842(2-port) 1789 This platform driver is for KSZ8841(1-port) / KS8842(2-port)
1790 ethernet switch chip (managed, VLAN, QoS) from Micrel or 1790 ethernet switch chip (managed, VLAN, QoS) from Micrel or
1791 Timberdale(FPGA). 1791 Timberdale(FPGA).
1792 1792
1793config KS8851 1793config KS8851
1794 tristate "Micrel KS8851 SPI" 1794 tristate "Micrel KS8851 SPI"
1795 depends on SPI 1795 depends on SPI
1796 select MII 1796 select MII
1797 select CRC32 1797 select CRC32
1798 help 1798 help
1799 SPI driver for Micrel KS8851 SPI attached network chip. 1799 SPI driver for Micrel KS8851 SPI attached network chip.
1800 1800
1801config KS8851_MLL 1801config KS8851_MLL
1802 tristate "Micrel KS8851 MLL" 1802 tristate "Micrel KS8851 MLL"
@@ -2133,25 +2133,25 @@ config IP1000
2133 will be called ipg. This is recommended. 2133 will be called ipg. This is recommended.
2134 2134
2135config IGB 2135config IGB
2136 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" 2136 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
2137 depends on PCI 2137 depends on PCI
2138 ---help--- 2138 ---help---
2139 This driver supports Intel(R) 82575/82576 gigabit ethernet family of 2139 This driver supports Intel(R) 82575/82576 gigabit ethernet family of
2140 adapters. For more information on how to identify your adapter, go 2140 adapters. For more information on how to identify your adapter, go
2141 to the Adapter & Driver ID Guide at: 2141 to the Adapter & Driver ID Guide at:
2142 2142
2143 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 2143 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2144 2144
2145 For general information and support, go to the Intel support 2145 For general information and support, go to the Intel support
2146 website at: 2146 website at:
2147 2147
2148 <http://support.intel.com> 2148 <http://support.intel.com>
2149 2149
2150 More specific information on configuring the driver is in 2150 More specific information on configuring the driver is in
2151 <file:Documentation/networking/e1000.txt>. 2151 <file:Documentation/networking/e1000.txt>.
2152 2152
2153 To compile this driver as a module, choose M here. The module 2153 To compile this driver as a module, choose M here. The module
2154 will be called igb. 2154 will be called igb.
2155 2155
2156config IGB_DCA 2156config IGB_DCA
2157 bool "Direct Cache Access (DCA) Support" 2157 bool "Direct Cache Access (DCA) Support"
@@ -2163,25 +2163,25 @@ config IGB_DCA
2163 is used, with the intent of lessening the impact of cache misses. 2163 is used, with the intent of lessening the impact of cache misses.
2164 2164
2165config IGBVF 2165config IGBVF
2166 tristate "Intel(R) 82576 Virtual Function Ethernet support" 2166 tristate "Intel(R) 82576 Virtual Function Ethernet support"
2167 depends on PCI 2167 depends on PCI
2168 ---help--- 2168 ---help---
2169 This driver supports Intel(R) 82576 virtual functions. For more 2169 This driver supports Intel(R) 82576 virtual functions. For more
2170 information on how to identify your adapter, go to the Adapter & 2170 information on how to identify your adapter, go to the Adapter &
2171 Driver ID Guide at: 2171 Driver ID Guide at:
2172 2172
2173 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 2173 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2174 2174
2175 For general information and support, go to the Intel support 2175 For general information and support, go to the Intel support
2176 website at: 2176 website at:
2177 2177
2178 <http://support.intel.com> 2178 <http://support.intel.com>
2179 2179
2180 More specific information on configuring the driver is in 2180 More specific information on configuring the driver is in
2181 <file:Documentation/networking/e1000.txt>. 2181 <file:Documentation/networking/e1000.txt>.
2182 2182
2183 To compile this driver as a module, choose M here. The module 2183 To compile this driver as a module, choose M here. The module
2184 will be called igbvf. 2184 will be called igbvf.
2185 2185
2186source "drivers/net/ixp2000/Kconfig" 2186source "drivers/net/ixp2000/Kconfig"
2187 2187
@@ -2300,14 +2300,14 @@ config SKGE
2300 will be called skge. This is recommended. 2300 will be called skge. This is recommended.
2301 2301
2302config SKGE_DEBUG 2302config SKGE_DEBUG
2303 bool "Debugging interface" 2303 bool "Debugging interface"
2304 depends on SKGE && DEBUG_FS 2304 depends on SKGE && DEBUG_FS
2305 help 2305 help
2306 This option adds the ability to dump driver state for debugging. 2306 This option adds the ability to dump driver state for debugging.
2307 The file /sys/kernel/debug/skge/ethX displays the state of the internal 2307 The file /sys/kernel/debug/skge/ethX displays the state of the internal
2308 transmit and receive rings. 2308 transmit and receive rings.
2309 2309
2310 If unsure, say N. 2310 If unsure, say N.
2311 2311
2312config SKY2 2312config SKY2
2313 tristate "SysKonnect Yukon2 support" 2313 tristate "SysKonnect Yukon2 support"
@@ -2326,14 +2326,14 @@ config SKY2
2326 will be called sky2. This is recommended. 2326 will be called sky2. This is recommended.
2327 2327
2328config SKY2_DEBUG 2328config SKY2_DEBUG
2329 bool "Debugging interface" 2329 bool "Debugging interface"
2330 depends on SKY2 && DEBUG_FS 2330 depends on SKY2 && DEBUG_FS
2331 help 2331 help
2332 This option adds the ability to dump driver state for debugging. 2332 This option adds the ability to dump driver state for debugging.
2333 The file /sys/kernel/debug/sky2/ethX displays the state of the internal 2333 The file /sys/kernel/debug/sky2/ethX displays the state of the internal
2334 transmit and receive rings. 2334 transmit and receive rings.
2335 2335
2336 If unsure, say N. 2336 If unsure, say N.
2337 2337
2338config VIA_VELOCITY 2338config VIA_VELOCITY
2339 tristate "VIA Velocity support" 2339 tristate "VIA Velocity support"
@@ -2389,12 +2389,12 @@ config SPIDER_NET
2389 Cell Processor-Based Blades from IBM. 2389 Cell Processor-Based Blades from IBM.
2390 2390
2391config TSI108_ETH 2391config TSI108_ETH
2392 tristate "Tundra TSI108 gigabit Ethernet support" 2392 tristate "Tundra TSI108 gigabit Ethernet support"
2393 depends on TSI108_BRIDGE 2393 depends on TSI108_BRIDGE
2394 help 2394 help
2395 This driver supports Tundra TSI108 gigabit Ethernet ports. 2395 This driver supports Tundra TSI108 gigabit Ethernet ports.
2396 To compile this driver as a module, choose M here: the module 2396 To compile this driver as a module, choose M here: the module
2397 will be called tsi108_eth. 2397 will be called tsi108_eth.
2398 2398
2399config GELIC_NET 2399config GELIC_NET
2400 tristate "PS3 Gigabit Ethernet driver" 2400 tristate "PS3 Gigabit Ethernet driver"
@@ -2573,32 +2573,32 @@ config MDIO
2573 tristate 2573 tristate
2574 2574
2575config CHELSIO_T1 2575config CHELSIO_T1
2576 tristate "Chelsio 10Gb Ethernet support" 2576 tristate "Chelsio 10Gb Ethernet support"
2577 depends on PCI 2577 depends on PCI
2578 select CRC32 2578 select CRC32
2579 select MDIO 2579 select MDIO
2580 help 2580 help
2581 This driver supports Chelsio gigabit and 10-gigabit 2581 This driver supports Chelsio gigabit and 10-gigabit
2582 Ethernet cards. More information about adapter features and 2582 Ethernet cards. More information about adapter features and
2583 performance tuning is in <file:Documentation/networking/cxgb.txt>. 2583 performance tuning is in <file:Documentation/networking/cxgb.txt>.
2584 2584
2585 For general information about Chelsio and our products, visit 2585 For general information about Chelsio and our products, visit
2586 our website at <http://www.chelsio.com>. 2586 our website at <http://www.chelsio.com>.
2587 2587
2588 For customer support, please visit our customer support page at 2588 For customer support, please visit our customer support page at
2589 <http://www.chelsio.com/support.html>. 2589 <http://www.chelsio.com/support.html>.
2590 2590
2591 Please send feedback to <linux-bugs@chelsio.com>. 2591 Please send feedback to <linux-bugs@chelsio.com>.
2592 2592
2593 To compile this driver as a module, choose M here: the module 2593 To compile this driver as a module, choose M here: the module
2594 will be called cxgb. 2594 will be called cxgb.
2595 2595
2596config CHELSIO_T1_1G 2596config CHELSIO_T1_1G
2597 bool "Chelsio gigabit Ethernet support" 2597 bool "Chelsio gigabit Ethernet support"
2598 depends on CHELSIO_T1 2598 depends on CHELSIO_T1
2599 help 2599 help
2600 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2600 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2601 are using only 10G cards say 'N' here. 2601 are using only 10G cards say 'N' here.
2602 2602
2603config CHELSIO_T3_DEPENDS 2603config CHELSIO_T3_DEPENDS
2604 tristate 2604 tristate
@@ -2728,26 +2728,26 @@ config IXGBE_DCB
2728 If unsure, say N. 2728 If unsure, say N.
2729 2729
2730config IXGBEVF 2730config IXGBEVF
2731 tristate "Intel(R) 82599 Virtual Function Ethernet support" 2731 tristate "Intel(R) 82599 Virtual Function Ethernet support"
2732 depends on PCI_MSI 2732 depends on PCI_MSI
2733 ---help--- 2733 ---help---
2734 This driver supports Intel(R) 82599 virtual functions. For more 2734 This driver supports Intel(R) 82599 virtual functions. For more
2735 information on how to identify your adapter, go to the Adapter & 2735 information on how to identify your adapter, go to the Adapter &
2736 Driver ID Guide at: 2736 Driver ID Guide at:
2737 2737
2738 <http://support.intel.com/support/network/sb/CS-008441.htm> 2738 <http://support.intel.com/support/network/sb/CS-008441.htm>
2739 2739
2740 For general information and support, go to the Intel support 2740 For general information and support, go to the Intel support
2741 website at: 2741 website at:
2742 2742
2743 <http://support.intel.com> 2743 <http://support.intel.com>
2744 2744
2745 More specific information on configuring the driver is in 2745 More specific information on configuring the driver is in
2746 <file:Documentation/networking/ixgbevf.txt>. 2746 <file:Documentation/networking/ixgbevf.txt>.
2747 2747
2748 To compile this driver as a module, choose M here. The module 2748 To compile this driver as a module, choose M here. The module
2749 will be called ixgbevf. MSI-X interrupt support is required 2749 will be called ixgbevf. MSI-X interrupt support is required
2750 for this driver to work correctly. 2750 for this driver to work correctly.
2751 2751
2752config IXGB 2752config IXGB
2753 tristate "Intel(R) PRO/10GbE support" 2753 tristate "Intel(R) PRO/10GbE support"
@@ -2772,29 +2772,38 @@ config IXGB
2772 will be called ixgb. 2772 will be called ixgb.
2773 2773
2774config S2IO 2774config S2IO
2775 tristate "S2IO 10Gbe XFrame NIC" 2775 tristate "Exar Xframe 10Gb Ethernet Adapter"
2776 depends on PCI 2776 depends on PCI
2777 ---help--- 2777 ---help---
2778 This driver supports the 10Gbe XFrame NIC of S2IO. 2778 This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
2779
2779 More specific information on configuring the driver is in 2780 More specific information on configuring the driver is in
2780 <file:Documentation/networking/s2io.txt>. 2781 <file:Documentation/networking/s2io.txt>.
2781 2782
2783 To compile this driver as a module, choose M here. The module
2784 will be called s2io.
2785
2782config VXGE 2786config VXGE
2783 tristate "Neterion X3100 Series 10GbE PCIe Server Adapter" 2787 tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
2784 depends on PCI && INET 2788 depends on PCI && INET
2785 ---help--- 2789 ---help---
2786 This driver supports Neterion Inc's X3100 Series 10 GbE PCIe 2790 This driver supports Exar Corp's X3100 Series 10 GbE PCIe
2787 I/O Virtualized Server Adapter. 2791 I/O Virtualized Server Adapter.
2792
2788 More specific information on configuring the driver is in 2793 More specific information on configuring the driver is in
2789 <file:Documentation/networking/vxge.txt>. 2794 <file:Documentation/networking/vxge.txt>.
2790 2795
2796 To compile this driver as a module, choose M here. The module
2797 will be called vxge.
2798
2791config VXGE_DEBUG_TRACE_ALL 2799config VXGE_DEBUG_TRACE_ALL
2792 bool "Enabling All Debug trace statments in driver" 2800 bool "Enabling All Debug trace statments in driver"
2793 default n 2801 default n
2794 depends on VXGE 2802 depends on VXGE
2795 ---help--- 2803 ---help---
2796 Say Y here if you want to enabling all the debug trace statements in 2804 Say Y here if you want to enabling all the debug trace statements in
2797 driver. By default only few debug trace statements are enabled. 2805 the vxge driver. By default only few debug trace statements are
2806 enabled.
2798 2807
2799config MYRI10GE 2808config MYRI10GE
2800 tristate "Myricom Myri-10G Ethernet support" 2809 tristate "Myricom Myri-10G Ethernet support"
@@ -2906,18 +2915,18 @@ config QLGE
2906 will be called qlge. 2915 will be called qlge.
2907 2916
2908config BNA 2917config BNA
2909 tristate "Brocade 1010/1020 10Gb Ethernet Driver support" 2918 tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
2910 depends on PCI 2919 depends on PCI
2911 ---help--- 2920 ---help---
2912 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet 2921 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
2913 cards. 2922 cards.
2914 To compile this driver as a module, choose M here: the module 2923 To compile this driver as a module, choose M here: the module
2915 will be called bna. 2924 will be called bna.
2916 2925
2917 For general information and support, go to the Brocade support 2926 For general information and support, go to the Brocade support
2918 website at: 2927 website at:
2919 2928
2920 <http://support.brocade.com> 2929 <http://support.brocade.com>
2921 2930
2922source "drivers/net/sfc/Kconfig" 2931source "drivers/net/sfc/Kconfig"
2923 2932
@@ -3227,18 +3236,18 @@ config PPP_BSDCOMP
3227 modules once you have said "make modules". If unsure, say N. 3236 modules once you have said "make modules". If unsure, say N.
3228 3237
3229config PPP_MPPE 3238config PPP_MPPE
3230 tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)" 3239 tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
3231 depends on PPP && EXPERIMENTAL 3240 depends on PPP && EXPERIMENTAL
3232 select CRYPTO 3241 select CRYPTO
3233 select CRYPTO_SHA1 3242 select CRYPTO_SHA1
3234 select CRYPTO_ARC4 3243 select CRYPTO_ARC4
3235 select CRYPTO_ECB 3244 select CRYPTO_ECB
3236 ---help--- 3245 ---help---
3237 Support for the MPPE Encryption protocol, as employed by the 3246 Support for the MPPE Encryption protocol, as employed by the
3238 Microsoft Point-to-Point Tunneling Protocol. 3247 Microsoft Point-to-Point Tunneling Protocol.
3239 3248
3240 See http://pptpclient.sourceforge.net/ for information on 3249 See http://pptpclient.sourceforge.net/ for information on
3241 configuring PPTP clients and servers to utilize this method. 3250 configuring PPTP clients and servers to utilize this method.
3242 3251
3243config PPPOE 3252config PPPOE
3244 tristate "PPP over Ethernet (EXPERIMENTAL)" 3253 tristate "PPP over Ethernet (EXPERIMENTAL)"
@@ -3397,14 +3406,14 @@ config VIRTIO_NET
3397 depends on EXPERIMENTAL && VIRTIO 3406 depends on EXPERIMENTAL && VIRTIO
3398 ---help--- 3407 ---help---
3399 This is the virtual network driver for virtio. It can be used with 3408 This is the virtual network driver for virtio. It can be used with
3400 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. 3409 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
3401 3410
3402config VMXNET3 3411config VMXNET3
3403 tristate "VMware VMXNET3 ethernet driver" 3412 tristate "VMware VMXNET3 ethernet driver"
3404 depends on PCI && INET 3413 depends on PCI && INET
3405 help 3414 help
3406 This driver supports VMware's vmxnet3 virtual ethernet NIC. 3415 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3407 To compile this driver as a module, choose M here: the 3416 To compile this driver as a module, choose M here: the
3408 module will be called vmxnet3. 3417 module will be called vmxnet3.
3409 3418
3410endif # NETDEVICES 3419endif # NETDEVICES
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 62f21106efe..0c9217f48b7 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -340,14 +340,6 @@ am79c961_close(struct net_device *dev)
340 return 0; 340 return 0;
341} 341}
342 342
343/*
344 * Get the current statistics.
345 */
346static struct net_device_stats *am79c961_getstats (struct net_device *dev)
347{
348 return &dev->stats;
349}
350
351static void am79c961_mc_hash(char *addr, unsigned short *hash) 343static void am79c961_mc_hash(char *addr, unsigned short *hash)
352{ 344{
353 if (addr[0] & 0x01) { 345 if (addr[0] & 0x01) {
@@ -665,7 +657,6 @@ static const struct net_device_ops am79c961_netdev_ops = {
665 .ndo_open = am79c961_open, 657 .ndo_open = am79c961_open,
666 .ndo_stop = am79c961_close, 658 .ndo_stop = am79c961_close,
667 .ndo_start_xmit = am79c961_sendpacket, 659 .ndo_start_xmit = am79c961_sendpacket,
668 .ndo_get_stats = am79c961_getstats,
669 .ndo_set_multicast_list = am79c961_setmulticastlist, 660 .ndo_set_multicast_list = am79c961_setmulticastlist,
670 .ndo_tx_timeout = am79c961_timeout, 661 .ndo_tx_timeout = am79c961_timeout,
671 .ndo_validate_addr = eth_validate_addr, 662 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 4545d5a06c2..bfea499a351 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -117,7 +117,7 @@
117#define TX_DESC_SIZE 10 117#define TX_DESC_SIZE 10
118#define MAX_RBUFF_SZ 0x600 118#define MAX_RBUFF_SZ 0x600
119#define MAX_TBUFF_SZ 0x600 119#define MAX_TBUFF_SZ 0x600
120#define TX_TIMEOUT 50 120#define TX_TIMEOUT (HZ/2)
121#define DELAY 1000 121#define DELAY 1000
122#define CAM0 0x0 122#define CAM0 0x0
123 123
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 89876897a6f..871b1633f54 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -150,7 +150,7 @@ struct net_local {
150#define PORT_OFFSET(o) (o) 150#define PORT_OFFSET(o) (o)
151 151
152 152
153#define TX_TIMEOUT 10 153#define TX_TIMEOUT (HZ/10)
154 154
155 155
156/* Index to functions, as function prototypes. */ 156/* Index to functions, as function prototypes. */
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 8cb27cb7bca..ce0091eb06f 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -116,7 +116,7 @@ MODULE_LICENSE("GPL");
116#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) 116#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
117#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) 117#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
118 118
119#define TX_TIMEOUT 20 119#define TX_TIMEOUT (HZ/5)
120 120
121/* The LANCE Rx and Tx ring descriptors. */ 121/* The LANCE Rx and Tx ring descriptors. */
122struct lance_rx_head { 122struct lance_rx_head {
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b6da4cf3694..4bebff3faea 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -325,7 +325,7 @@ static void ax_block_output(struct net_device *dev, int count,
325static void 325static void
326ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len) 326ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
327{ 327{
328 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 328 struct ei_device *ei_local = netdev_priv(dev);
329 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; 329 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
330 unsigned int memr; 330 unsigned int memr;
331 331
@@ -364,7 +364,7 @@ ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
364static unsigned int 364static unsigned int
365ax_phy_ei_inbits(struct net_device *dev, int no) 365ax_phy_ei_inbits(struct net_device *dev, int no)
366{ 366{
367 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 367 struct ei_device *ei_local = netdev_priv(dev);
368 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; 368 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
369 unsigned int memr; 369 unsigned int memr;
370 unsigned int result = 0; 370 unsigned int result = 0;
@@ -412,7 +412,7 @@ ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
412static int 412static int
413ax_phy_read(struct net_device *dev, int phy_addr, int reg) 413ax_phy_read(struct net_device *dev, int phy_addr, int reg)
414{ 414{
415 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 415 struct ei_device *ei_local = netdev_priv(dev);
416 unsigned long flags; 416 unsigned long flags;
417 unsigned int result; 417 unsigned int result;
418 418
@@ -435,7 +435,7 @@ ax_phy_read(struct net_device *dev, int phy_addr, int reg)
435static void 435static void
436ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value) 436ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
437{ 437{
438 struct ei_device *ei = (struct ei_device *) netdev_priv(dev); 438 struct ei_device *ei = netdev_priv(dev);
439 struct ax_device *ax = to_ax_dev(dev); 439 struct ax_device *ax = to_ax_dev(dev);
440 unsigned long flags; 440 unsigned long flags;
441 441
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 4594a28b1f6..9cab32328bb 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -38,14 +38,17 @@
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
40#define OC_NAME "Emulex OneConnect 10Gbps NIC" 40#define OC_NAME "Emulex OneConnect 10Gbps NIC"
41#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)" 41#define OC_NAME_BE OC_NAME "(be3)"
42#define OC_NAME_LANCER OC_NAME "(Lancer)"
42#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" 43#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
43 44
44#define BE_VENDOR_ID 0x19a2 45#define BE_VENDOR_ID 0x19a2
46#define EMULEX_VENDOR_ID 0x10df
45#define BE_DEVICE_ID1 0x211 47#define BE_DEVICE_ID1 0x211
46#define BE_DEVICE_ID2 0x221 48#define BE_DEVICE_ID2 0x221
47#define OC_DEVICE_ID1 0x700 49#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
48#define OC_DEVICE_ID2 0x710 50#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
51#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
49 52
50static inline char *nic_name(struct pci_dev *pdev) 53static inline char *nic_name(struct pci_dev *pdev)
51{ 54{
@@ -53,7 +56,9 @@ static inline char *nic_name(struct pci_dev *pdev)
53 case OC_DEVICE_ID1: 56 case OC_DEVICE_ID1:
54 return OC_NAME; 57 return OC_NAME;
55 case OC_DEVICE_ID2: 58 case OC_DEVICE_ID2:
56 return OC_NAME1; 59 return OC_NAME_BE;
60 case OC_DEVICE_ID3:
61 return OC_NAME_LANCER;
57 case BE_DEVICE_ID2: 62 case BE_DEVICE_ID2:
58 return BE3_NAME; 63 return BE3_NAME;
59 default: 64 default:
@@ -149,6 +154,7 @@ struct be_eq_obj {
149 u16 min_eqd; /* in usecs */ 154 u16 min_eqd; /* in usecs */
150 u16 max_eqd; /* in usecs */ 155 u16 max_eqd; /* in usecs */
151 u16 cur_eqd; /* in usecs */ 156 u16 cur_eqd; /* in usecs */
157 u8 msix_vec_idx;
152 158
153 struct napi_struct napi; 159 struct napi_struct napi;
154}; 160};
@@ -214,7 +220,9 @@ struct be_rx_obj {
214 struct be_rx_stats stats; 220 struct be_rx_stats stats;
215 u8 rss_id; 221 u8 rss_id;
216 bool rx_post_starved; /* Zero rx frags have been posted to BE */ 222 bool rx_post_starved; /* Zero rx frags have been posted to BE */
217 u32 cache_line_barrier[16]; 223 u16 last_frag_index;
224 u16 rsvd;
225 u32 cache_line_barrier[15];
218}; 226};
219 227
220struct be_vf_cfg { 228struct be_vf_cfg {
@@ -260,6 +268,8 @@ struct be_adapter {
260 u32 num_rx_qs; 268 u32 num_rx_qs;
261 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 269 u32 big_page_size; /* Compounded page size shared by rx wrbs */
262 270
271 u8 msix_vec_next_idx;
272
263 struct vlan_group *vlan_grp; 273 struct vlan_group *vlan_grp;
264 u16 vlans_added; 274 u16 vlans_added;
265 u16 max_vlans; /* Number of vlans supported */ 275 u16 max_vlans; /* Number of vlans supported */
@@ -299,8 +309,8 @@ struct be_adapter {
299 309
300 bool sriov_enabled; 310 bool sriov_enabled;
301 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 311 struct be_vf_cfg vf_cfg[BE_MAX_VF];
302 u8 base_eq_id;
303 u8 is_virtfn; 312 u8 is_virtfn;
313 u32 sli_family;
304}; 314};
305 315
306#define be_physfn(adapter) (!adapter->is_virtfn) 316#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -309,6 +319,8 @@ struct be_adapter {
309#define BE_GEN2 2 319#define BE_GEN2 2
310#define BE_GEN3 3 320#define BE_GEN3 3
311 321
322#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
323
312extern const struct ethtool_ops be_ethtool_ops; 324extern const struct ethtool_ops be_ethtool_ops;
313 325
314#define tx_stats(adapter) (&adapter->tx_stats) 326#define tx_stats(adapter) (&adapter->tx_stats)
@@ -416,10 +428,17 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
416static inline void be_check_sriov_fn_type(struct be_adapter *adapter) 428static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
417{ 429{
418 u8 data; 430 u8 data;
419 431 u32 sli_intf;
420 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA); 432
421 pci_read_config_byte(adapter->pdev, 0xFE, &data); 433 if (lancer_chip(adapter)) {
422 adapter->is_virtfn = (data != 0xAA); 434 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
435 &sli_intf);
436 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
437 } else {
438 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
439 pci_read_config_byte(adapter->pdev, 0xFE, &data);
440 adapter->is_virtfn = (data != 0xAA);
441 }
423} 442}
424 443
425static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 444static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index e4465d222a7..171a08caf2b 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -323,7 +323,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
323 323
324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
325{ 325{
326 u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); 326 u32 sem;
327
328 if (lancer_chip(adapter))
329 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
330 else
331 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
327 332
328 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; 333 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
329 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) 334 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@@ -680,16 +685,36 @@ int be_cmd_cq_create(struct be_adapter *adapter,
680 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); 685 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
681 686
682 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 687 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
688 if (lancer_chip(adapter)) {
689 req->hdr.version = 1;
690 req->page_size = 1; /* 1 for 4K */
691 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
692 coalesce_wm);
693 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
694 no_delay);
695 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
696 __ilog2_u32(cq->len/256));
697 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
698 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
699 ctxt, 1);
700 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
701 ctxt, eq->id);
702 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
703 } else {
704 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
705 coalesce_wm);
706 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
707 ctxt, no_delay);
708 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
709 __ilog2_u32(cq->len/256));
710 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
711 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
712 ctxt, sol_evts);
713 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
714 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
715 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
716 }
683 717
684 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
685 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
686 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
687 __ilog2_u32(cq->len/256));
688 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
689 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
690 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
691 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
692 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
693 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 718 be_dws_cpu_to_le(ctxt, sizeof(req->context));
694 719
695 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 720 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -737,13 +762,27 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
737 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); 762 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
738 763
739 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 764 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
765 if (lancer_chip(adapter)) {
766 req->hdr.version = 1;
767 req->cq_id = cpu_to_le16(cq->id);
768
769 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
770 be_encoded_q_len(mccq->len));
771 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
772 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
773 ctxt, cq->id);
774 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
775 ctxt, 1);
776
777 } else {
778 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
779 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
780 be_encoded_q_len(mccq->len));
781 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
782 }
740 783
741 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
742 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
743 be_encoded_q_len(mccq->len));
744 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
745 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 784 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
746 req->async_event_bitmap[0] |= 0x00000022; 785 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
747 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 786 be_dws_cpu_to_le(ctxt, sizeof(req->context));
748 787
749 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 788 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8469ff061f3..83d15c8a9fa 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -309,7 +309,7 @@ struct be_cmd_req_pmac_del {
309/******************** Create CQ ***************************/ 309/******************** Create CQ ***************************/
310/* Pseudo amap definition in which each bit of the actual structure is defined 310/* Pseudo amap definition in which each bit of the actual structure is defined
311 * as a byte: used to calculate offset/shift/mask of each field */ 311 * as a byte: used to calculate offset/shift/mask of each field */
312struct amap_cq_context { 312struct amap_cq_context_be {
313 u8 cidx[11]; /* dword 0*/ 313 u8 cidx[11]; /* dword 0*/
314 u8 rsvd0; /* dword 0*/ 314 u8 rsvd0; /* dword 0*/
315 u8 coalescwm[2]; /* dword 0*/ 315 u8 coalescwm[2]; /* dword 0*/
@@ -332,14 +332,32 @@ struct amap_cq_context {
332 u8 rsvd5[32]; /* dword 3*/ 332 u8 rsvd5[32]; /* dword 3*/
333} __packed; 333} __packed;
334 334
335struct amap_cq_context_lancer {
336 u8 rsvd0[12]; /* dword 0*/
337 u8 coalescwm[2]; /* dword 0*/
338 u8 nodelay; /* dword 0*/
339 u8 rsvd1[12]; /* dword 0*/
340 u8 count[2]; /* dword 0*/
341 u8 valid; /* dword 0*/
342 u8 rsvd2; /* dword 0*/
343 u8 eventable; /* dword 0*/
344 u8 eqid[16]; /* dword 1*/
345 u8 rsvd3[15]; /* dword 1*/
346 u8 armed; /* dword 1*/
347 u8 rsvd4[32]; /* dword 2*/
348 u8 rsvd5[32]; /* dword 3*/
349} __packed;
350
335struct be_cmd_req_cq_create { 351struct be_cmd_req_cq_create {
336 struct be_cmd_req_hdr hdr; 352 struct be_cmd_req_hdr hdr;
337 u16 num_pages; 353 u16 num_pages;
338 u16 rsvd0; 354 u8 page_size;
339 u8 context[sizeof(struct amap_cq_context) / 8]; 355 u8 rsvd0;
356 u8 context[sizeof(struct amap_cq_context_be) / 8];
340 struct phys_addr pages[8]; 357 struct phys_addr pages[8];
341} __packed; 358} __packed;
342 359
360
343struct be_cmd_resp_cq_create { 361struct be_cmd_resp_cq_create {
344 struct be_cmd_resp_hdr hdr; 362 struct be_cmd_resp_hdr hdr;
345 u16 cq_id; 363 u16 cq_id;
@@ -349,7 +367,7 @@ struct be_cmd_resp_cq_create {
349/******************** Create MCCQ ***************************/ 367/******************** Create MCCQ ***************************/
350/* Pseudo amap definition in which each bit of the actual structure is defined 368/* Pseudo amap definition in which each bit of the actual structure is defined
351 * as a byte: used to calculate offset/shift/mask of each field */ 369 * as a byte: used to calculate offset/shift/mask of each field */
352struct amap_mcc_context { 370struct amap_mcc_context_be {
353 u8 con_index[14]; 371 u8 con_index[14];
354 u8 rsvd0[2]; 372 u8 rsvd0[2];
355 u8 ring_size[4]; 373 u8 ring_size[4];
@@ -364,12 +382,23 @@ struct amap_mcc_context {
364 u8 rsvd2[32]; 382 u8 rsvd2[32];
365} __packed; 383} __packed;
366 384
385struct amap_mcc_context_lancer {
386 u8 async_cq_id[16];
387 u8 ring_size[4];
388 u8 rsvd0[12];
389 u8 rsvd1[31];
390 u8 valid;
391 u8 async_cq_valid[1];
392 u8 rsvd2[31];
393 u8 rsvd3[32];
394} __packed;
395
367struct be_cmd_req_mcc_create { 396struct be_cmd_req_mcc_create {
368 struct be_cmd_req_hdr hdr; 397 struct be_cmd_req_hdr hdr;
369 u16 num_pages; 398 u16 num_pages;
370 u16 rsvd0; 399 u16 cq_id;
371 u32 async_event_bitmap[1]; 400 u32 async_event_bitmap[1];
372 u8 context[sizeof(struct amap_mcc_context) / 8]; 401 u8 context[sizeof(struct amap_mcc_context_be) / 8];
373 struct phys_addr pages[8]; 402 struct phys_addr pages[8];
374} __packed; 403} __packed;
375 404
@@ -605,6 +634,7 @@ struct be_hw_stats {
605 struct be_rxf_stats rxf; 634 struct be_rxf_stats rxf;
606 u32 rsvd[48]; 635 u32 rsvd[48];
607 struct be_erx_stats erx; 636 struct be_erx_stats erx;
637 u32 rsvd1[6];
608}; 638};
609 639
610struct be_cmd_req_get_stats { 640struct be_cmd_req_get_stats {
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index a2ec5df0d73..4096d977823 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -32,10 +32,12 @@
32#define MPU_EP_CONTROL 0 32#define MPU_EP_CONTROL 0
33 33
34/********** MPU semphore ******************/ 34/********** MPU semphore ******************/
35#define MPU_EP_SEMAPHORE_OFFSET 0xac 35#define MPU_EP_SEMAPHORE_OFFSET 0xac
36#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF 36#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
37#define EP_SEMAPHORE_POST_ERR_MASK 0x1 37#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
38#define EP_SEMAPHORE_POST_ERR_SHIFT 31 38#define EP_SEMAPHORE_POST_ERR_MASK 0x1
39#define EP_SEMAPHORE_POST_ERR_SHIFT 31
40
39/* MPU semphore POST stage values */ 41/* MPU semphore POST stage values */
40#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ 42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
41#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */ 43#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
@@ -66,6 +68,28 @@
66#define PCICFG_UE_STATUS_LOW_MASK 0xA8 68#define PCICFG_UE_STATUS_LOW_MASK 0xA8
67#define PCICFG_UE_STATUS_HI_MASK 0xAC 69#define PCICFG_UE_STATUS_HI_MASK 0xAC
68 70
71/******** SLI_INTF ***********************/
72#define SLI_INTF_REG_OFFSET 0x58
73#define SLI_INTF_VALID_MASK 0xE0000000
74#define SLI_INTF_VALID 0xC0000000
75#define SLI_INTF_HINT2_MASK 0x1F000000
76#define SLI_INTF_HINT2_SHIFT 24
77#define SLI_INTF_HINT1_MASK 0x00FF0000
78#define SLI_INTF_HINT1_SHIFT 16
79#define SLI_INTF_FAMILY_MASK 0x00000F00
80#define SLI_INTF_FAMILY_SHIFT 8
81#define SLI_INTF_IF_TYPE_MASK 0x0000F000
82#define SLI_INTF_IF_TYPE_SHIFT 12
83#define SLI_INTF_REV_MASK 0x000000F0
84#define SLI_INTF_REV_SHIFT 4
85#define SLI_INTF_FT_MASK 0x00000001
86
87
88/* SLI family */
89#define BE_SLI_FAMILY 0x0
90#define LANCER_A0_SLI_FAMILY 0xA
91
92
69/********* ISR0 Register offset **********/ 93/********* ISR0 Register offset **********/
70#define CEV_ISR0_OFFSET 0xC18 94#define CEV_ISR0_OFFSET 0xC18
71#define CEV_ISR_SIZE 4 95#define CEV_ISR_SIZE 4
@@ -73,6 +97,9 @@
73/********* Event Q door bell *************/ 97/********* Event Q door bell *************/
74#define DB_EQ_OFFSET DB_CQ_OFFSET 98#define DB_EQ_OFFSET DB_CQ_OFFSET
75#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ 99#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
100#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
101#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
102
76/* Clear the interrupt for this eq */ 103/* Clear the interrupt for this eq */
77#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ 104#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
78/* Must be 1 */ 105/* Must be 1 */
@@ -85,6 +112,10 @@
85/********* Compl Q door bell *************/ 112/********* Compl Q door bell *************/
86#define DB_CQ_OFFSET 0x120 113#define DB_CQ_OFFSET 0x120
87#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ 114#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
115#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
116#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
117 placing at 11-15 */
118
88/* Number of event entries processed */ 119/* Number of event entries processed */
89#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 120#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
90/* Rearm bit */ 121/* Rearm bit */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 93354eee2cf..0b35e4a8bf1 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -41,6 +41,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { 0 } 45 { 0 }
45}; 46};
46MODULE_DEVICE_TABLE(pci, be_dev_ids); 47MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -188,6 +189,8 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
188{ 189{
189 u32 val = 0; 190 u32 val = 0;
190 val |= qid & DB_EQ_RING_ID_MASK; 191 val |= qid & DB_EQ_RING_ID_MASK;
192 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193 DB_EQ_RING_ID_EXT_MASK_SHIFT);
191 194
192 if (adapter->eeh_err) 195 if (adapter->eeh_err)
193 return; 196 return;
@@ -205,6 +208,8 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205{ 208{
206 u32 val = 0; 209 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK; 210 val |= qid & DB_CQ_RING_ID_MASK;
211 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212 DB_CQ_RING_ID_EXT_MASK_SHIFT);
208 213
209 if (adapter->eeh_err) 214 if (adapter->eeh_err)
210 return; 215 return;
@@ -404,7 +409,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
404} 409}
405 410
406/* Determine number of WRB entries needed to xmit data in an skb */ 411/* Determine number of WRB entries needed to xmit data in an skb */
407static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) 412static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
408{ 414{
409 int cnt = (skb->len > skb->data_len); 415 int cnt = (skb->len > skb->data_len);
410 416
@@ -412,12 +418,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
412 418
413 /* to account for hdr wrb */ 419 /* to account for hdr wrb */
414 cnt++; 420 cnt++;
415 if (cnt & 1) { 421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
416 /* add a dummy to make it an even num */ 424 /* add a dummy to make it an even num */
417 cnt++; 425 cnt++;
418 *dummy = true; 426 *dummy = true;
419 } else 427 }
420 *dummy = false;
421 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); 428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422 return cnt; 429 return cnt;
423} 430}
@@ -443,8 +450,18 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
443 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); 450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
444 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, 451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
445 hdr, skb_shinfo(skb)->gso_size); 452 hdr, skb_shinfo(skb)->gso_size);
446 if (skb_is_gso_v6(skb)) 453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); 454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
464 }
448 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
449 if (is_tcp_pkt(skb)) 466 if (is_tcp_pkt(skb))
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -566,7 +583,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
566 u32 start = txq->head; 583 u32 start = txq->head;
567 bool dummy_wrb, stopped = false; 584 bool dummy_wrb, stopped = false;
568 585
569 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); 586 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
570 587
571 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); 588 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
572 if (copied) { 589 if (copied) {
@@ -894,11 +911,17 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
894 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 911 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
895 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 912 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
896 913
897 for (i = 0; i < num_rcvd; i++) { 914 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
898 page_info = get_rx_page_info(adapter, rxo, rxq_idx); 915 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
899 put_page(page_info->page); 916
900 memset(page_info, 0, sizeof(*page_info)); 917 rxo->last_frag_index = rxq_idx;
901 index_inc(&rxq_idx, rxq->len); 918
919 for (i = 0; i < num_rcvd; i++) {
920 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
921 put_page(page_info->page);
922 memset(page_info, 0, sizeof(*page_info));
923 index_inc(&rxq_idx, rxq->len);
924 }
902 } 925 }
903} 926}
904 927
@@ -999,9 +1022,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
999 u8 vtm; 1022 u8 vtm;
1000 1023
1001 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 1024 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1002 /* Is it a flush compl that has no data */
1003 if (unlikely(num_rcvd == 0))
1004 return;
1005 1025
1006 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); 1026 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1007 if (unlikely(!skb)) { 1027 if (unlikely(!skb)) {
@@ -1035,7 +1055,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1035 return; 1055 return;
1036 } 1056 }
1037 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1057 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1038 vid = swab16(vid); 1058 if (!lancer_chip(adapter))
1059 vid = swab16(vid);
1039 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); 1060 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1040 } else { 1061 } else {
1041 netif_receive_skb(skb); 1062 netif_receive_skb(skb);
@@ -1057,10 +1078,6 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1057 u8 pkt_type; 1078 u8 pkt_type;
1058 1079
1059 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 1080 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1060 /* Is it a flush compl that has no data */
1061 if (unlikely(num_rcvd == 0))
1062 return;
1063
1064 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 1081 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1065 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 1082 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1066 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 1083 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -1113,7 +1130,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1113 napi_gro_frags(&eq_obj->napi); 1130 napi_gro_frags(&eq_obj->napi);
1114 } else { 1131 } else {
1115 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1132 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1116 vid = swab16(vid); 1133 if (!lancer_chip(adapter))
1134 vid = swab16(vid);
1117 1135
1118 if (!adapter->vlan_grp || adapter->vlans_added == 0) 1136 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1119 return; 1137 return;
@@ -1330,7 +1348,7 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1330 while ((rxcp = be_rx_compl_get(rxo)) != NULL) { 1348 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1331 be_rx_compl_discard(adapter, rxo, rxcp); 1349 be_rx_compl_discard(adapter, rxo, rxcp);
1332 be_rx_compl_reset(rxcp); 1350 be_rx_compl_reset(rxcp);
1333 be_cq_notify(adapter, rx_cq->id, true, 1); 1351 be_cq_notify(adapter, rx_cq->id, false, 1);
1334 } 1352 }
1335 1353
1336 /* Then free posted rx buffer that were not used */ 1354 /* Then free posted rx buffer that were not used */
@@ -1381,7 +1399,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1381 sent_skb = sent_skbs[txq->tail]; 1399 sent_skb = sent_skbs[txq->tail];
1382 end_idx = txq->tail; 1400 end_idx = txq->tail;
1383 index_adv(&end_idx, 1401 index_adv(&end_idx,
1384 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len); 1402 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1403 txq->len);
1385 be_tx_compl_process(adapter, end_idx); 1404 be_tx_compl_process(adapter, end_idx);
1386 } 1405 }
1387} 1406}
@@ -1476,7 +1495,9 @@ static int be_tx_queues_create(struct be_adapter *adapter)
1476 /* Ask BE to create Tx Event queue */ 1495 /* Ask BE to create Tx Event queue */
1477 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) 1496 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1478 goto tx_eq_free; 1497 goto tx_eq_free;
1479 adapter->base_eq_id = adapter->tx_eq.q.id; 1498
1499 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1500
1480 1501
1481 /* Alloc TX eth compl queue */ 1502 /* Alloc TX eth compl queue */
1482 cq = &adapter->tx_obj.cq; 1503 cq = &adapter->tx_obj.cq;
@@ -1554,6 +1575,9 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1554 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1575 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1555 for_all_rx_queues(adapter, rxo, i) { 1576 for_all_rx_queues(adapter, rxo, i) {
1556 rxo->adapter = adapter; 1577 rxo->adapter = adapter;
1578 /* Init last_frag_index so that the frag index in the first
1579 * completion will never match */
1580 rxo->last_frag_index = 0xffff;
1557 rxo->rx_eq.max_eqd = BE_MAX_EQD; 1581 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1558 rxo->rx_eq.enable_aic = true; 1582 rxo->rx_eq.enable_aic = true;
1559 1583
@@ -1568,6 +1592,8 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1568 if (rc) 1592 if (rc)
1569 goto err; 1593 goto err;
1570 1594
1595 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1596
1571 /* CQ */ 1597 /* CQ */
1572 cq = &rxo->cq; 1598 cq = &rxo->cq;
1573 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 1599 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
@@ -1578,7 +1604,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1578 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); 1604 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1579 if (rc) 1605 if (rc)
1580 goto err; 1606 goto err;
1581
1582 /* Rx Q */ 1607 /* Rx Q */
1583 q = &rxo->q; 1608 q = &rxo->q;
1584 rc = be_queue_alloc(adapter, q, RX_Q_LEN, 1609 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
@@ -1611,29 +1636,45 @@ err:
1611 return -1; 1636 return -1;
1612} 1637}
1613 1638
1614/* There are 8 evt ids per func. Retruns the evt id's bit number */ 1639static bool event_peek(struct be_eq_obj *eq_obj)
1615static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1616{ 1640{
1617 return eq_id - adapter->base_eq_id; 1641 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1642 if (!eqe->evt)
1643 return false;
1644 else
1645 return true;
1618} 1646}
1619 1647
1620static irqreturn_t be_intx(int irq, void *dev) 1648static irqreturn_t be_intx(int irq, void *dev)
1621{ 1649{
1622 struct be_adapter *adapter = dev; 1650 struct be_adapter *adapter = dev;
1623 struct be_rx_obj *rxo; 1651 struct be_rx_obj *rxo;
1624 int isr, i; 1652 int isr, i, tx = 0 , rx = 0;
1625 1653
1626 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + 1654 if (lancer_chip(adapter)) {
1627 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE); 1655 if (event_peek(&adapter->tx_eq))
1628 if (!isr) 1656 tx = event_handle(adapter, &adapter->tx_eq);
1629 return IRQ_NONE; 1657 for_all_rx_queues(adapter, rxo, i) {
1658 if (event_peek(&rxo->rx_eq))
1659 rx |= event_handle(adapter, &rxo->rx_eq);
1660 }
1630 1661
1631 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr)) 1662 if (!(tx || rx))
1632 event_handle(adapter, &adapter->tx_eq); 1663 return IRQ_NONE;
1633 1664
1634 for_all_rx_queues(adapter, rxo, i) { 1665 } else {
1635 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr)) 1666 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1636 event_handle(adapter, &rxo->rx_eq); 1667 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1668 if (!isr)
1669 return IRQ_NONE;
1670
1671 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1672 event_handle(adapter, &adapter->tx_eq);
1673
1674 for_all_rx_queues(adapter, rxo, i) {
1675 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1676 event_handle(adapter, &rxo->rx_eq);
1677 }
1637 } 1678 }
1638 1679
1639 return IRQ_HANDLED; 1680 return IRQ_HANDLED;
@@ -1658,10 +1699,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1658 return IRQ_HANDLED; 1699 return IRQ_HANDLED;
1659} 1700}
1660 1701
1661static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo, 1702static inline bool do_gro(struct be_rx_obj *rxo,
1662 struct be_eth_rx_compl *rxcp) 1703 struct be_eth_rx_compl *rxcp, u8 err)
1663{ 1704{
1664 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1665 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); 1705 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1666 1706
1667 if (err) 1707 if (err)
@@ -1678,6 +1718,8 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1678 struct be_queue_info *rx_cq = &rxo->cq; 1718 struct be_queue_info *rx_cq = &rxo->cq;
1679 struct be_eth_rx_compl *rxcp; 1719 struct be_eth_rx_compl *rxcp;
1680 u32 work_done; 1720 u32 work_done;
1721 u16 frag_index, num_rcvd;
1722 u8 err;
1681 1723
1682 rxo->stats.rx_polls++; 1724 rxo->stats.rx_polls++;
1683 for (work_done = 0; work_done < budget; work_done++) { 1725 for (work_done = 0; work_done < budget; work_done++) {
@@ -1685,10 +1727,22 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1685 if (!rxcp) 1727 if (!rxcp)
1686 break; 1728 break;
1687 1729
1688 if (do_gro(adapter, rxo, rxcp)) 1730 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1689 be_rx_compl_process_gro(adapter, rxo, rxcp); 1731 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1690 else 1732 rxcp);
1691 be_rx_compl_process(adapter, rxo, rxcp); 1733 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1734 rxcp);
1735
1736 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1737 if (likely(frag_index != rxo->last_frag_index &&
1738 num_rcvd != 0)) {
1739 rxo->last_frag_index = frag_index;
1740
1741 if (do_gro(rxo, rxcp, err))
1742 be_rx_compl_process_gro(adapter, rxo, rxcp);
1743 else
1744 be_rx_compl_process(adapter, rxo, rxcp);
1745 }
1692 1746
1693 be_rx_compl_reset(rxcp); 1747 be_rx_compl_reset(rxcp);
1694 } 1748 }
@@ -1830,8 +1884,7 @@ static void be_worker(struct work_struct *work)
1830 be_post_rx_frags(rxo); 1884 be_post_rx_frags(rxo);
1831 } 1885 }
1832 } 1886 }
1833 1887 if (!adapter->ue_detected && !lancer_chip(adapter))
1834 if (!adapter->ue_detected)
1835 be_detect_dump_ue(adapter); 1888 be_detect_dump_ue(adapter);
1836 1889
1837reschedule: 1890reschedule:
@@ -1910,10 +1963,10 @@ static void be_sriov_disable(struct be_adapter *adapter)
1910#endif 1963#endif
1911} 1964}
1912 1965
1913static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) 1966static inline int be_msix_vec_get(struct be_adapter *adapter,
1967 struct be_eq_obj *eq_obj)
1914{ 1968{
1915 return adapter->msix_entries[ 1969 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1916 be_evt_bit_get(adapter, eq_id)].vector;
1917} 1970}
1918 1971
1919static int be_request_irq(struct be_adapter *adapter, 1972static int be_request_irq(struct be_adapter *adapter,
@@ -1924,14 +1977,14 @@ static int be_request_irq(struct be_adapter *adapter,
1924 int vec; 1977 int vec;
1925 1978
1926 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); 1979 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1927 vec = be_msix_vec_get(adapter, eq_obj->q.id); 1980 vec = be_msix_vec_get(adapter, eq_obj);
1928 return request_irq(vec, handler, 0, eq_obj->desc, context); 1981 return request_irq(vec, handler, 0, eq_obj->desc, context);
1929} 1982}
1930 1983
1931static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj, 1984static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1932 void *context) 1985 void *context)
1933{ 1986{
1934 int vec = be_msix_vec_get(adapter, eq_obj->q.id); 1987 int vec = be_msix_vec_get(adapter, eq_obj);
1935 free_irq(vec, context); 1988 free_irq(vec, context);
1936} 1989}
1937 1990
@@ -2036,14 +2089,15 @@ static int be_close(struct net_device *netdev)
2036 netif_carrier_off(netdev); 2089 netif_carrier_off(netdev);
2037 adapter->link_up = false; 2090 adapter->link_up = false;
2038 2091
2039 be_intr_set(adapter, false); 2092 if (!lancer_chip(adapter))
2093 be_intr_set(adapter, false);
2040 2094
2041 if (adapter->msix_enabled) { 2095 if (adapter->msix_enabled) {
2042 vec = be_msix_vec_get(adapter, tx_eq->q.id); 2096 vec = be_msix_vec_get(adapter, tx_eq);
2043 synchronize_irq(vec); 2097 synchronize_irq(vec);
2044 2098
2045 for_all_rx_queues(adapter, rxo, i) { 2099 for_all_rx_queues(adapter, rxo, i) {
2046 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id); 2100 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2047 synchronize_irq(vec); 2101 synchronize_irq(vec);
2048 } 2102 }
2049 } else { 2103 } else {
@@ -2082,7 +2136,8 @@ static int be_open(struct net_device *netdev)
2082 2136
2083 be_irq_register(adapter); 2137 be_irq_register(adapter);
2084 2138
2085 be_intr_set(adapter, true); 2139 if (!lancer_chip(adapter))
2140 be_intr_set(adapter, true);
2086 2141
2087 /* The evt queues are created in unarmed state; arm them */ 2142 /* The evt queues are created in unarmed state; arm them */
2088 for_all_rx_queues(adapter, rxo, i) { 2143 for_all_rx_queues(adapter, rxo, i) {
@@ -2543,10 +2598,15 @@ static void be_netdev_init(struct net_device *netdev)
2543 int i; 2598 int i;
2544 2599
2545 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 2600 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2546 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | 2601 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2602 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2547 NETIF_F_GRO | NETIF_F_TSO6; 2603 NETIF_F_GRO | NETIF_F_TSO6;
2548 2604
2549 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; 2605 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2606 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2607
2608 if (lancer_chip(adapter))
2609 netdev->vlan_features |= NETIF_F_TSO6;
2550 2610
2551 netdev->flags |= IFF_MULTICAST; 2611 netdev->flags |= IFF_MULTICAST;
2552 2612
@@ -2587,6 +2647,15 @@ static int be_map_pci_bars(struct be_adapter *adapter)
2587 u8 __iomem *addr; 2647 u8 __iomem *addr;
2588 int pcicfg_reg, db_reg; 2648 int pcicfg_reg, db_reg;
2589 2649
2650 if (lancer_chip(adapter)) {
2651 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2652 pci_resource_len(adapter->pdev, 0));
2653 if (addr == NULL)
2654 return -ENOMEM;
2655 adapter->db = addr;
2656 return 0;
2657 }
2658
2590 if (be_physfn(adapter)) { 2659 if (be_physfn(adapter)) {
2591 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), 2660 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2592 pci_resource_len(adapter->pdev, 2)); 2661 pci_resource_len(adapter->pdev, 2));
@@ -2783,6 +2852,44 @@ static int be_get_config(struct be_adapter *adapter)
2783 return 0; 2852 return 0;
2784} 2853}
2785 2854
2855static int be_dev_family_check(struct be_adapter *adapter)
2856{
2857 struct pci_dev *pdev = adapter->pdev;
2858 u32 sli_intf = 0, if_type;
2859
2860 switch (pdev->device) {
2861 case BE_DEVICE_ID1:
2862 case OC_DEVICE_ID1:
2863 adapter->generation = BE_GEN2;
2864 break;
2865 case BE_DEVICE_ID2:
2866 case OC_DEVICE_ID2:
2867 adapter->generation = BE_GEN3;
2868 break;
2869 case OC_DEVICE_ID3:
2870 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2871 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2872 SLI_INTF_IF_TYPE_SHIFT;
2873
2874 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2875 if_type != 0x02) {
2876 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2877 return -EINVAL;
2878 }
2879 if (num_vfs > 0) {
2880 dev_err(&pdev->dev, "VFs not supported\n");
2881 return -EINVAL;
2882 }
2883 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2884 SLI_INTF_FAMILY_SHIFT);
2885 adapter->generation = BE_GEN3;
2886 break;
2887 default:
2888 adapter->generation = 0;
2889 }
2890 return 0;
2891}
2892
2786static int __devinit be_probe(struct pci_dev *pdev, 2893static int __devinit be_probe(struct pci_dev *pdev,
2787 const struct pci_device_id *pdev_id) 2894 const struct pci_device_id *pdev_id)
2788{ 2895{
@@ -2805,22 +2912,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
2805 goto rel_reg; 2912 goto rel_reg;
2806 } 2913 }
2807 adapter = netdev_priv(netdev); 2914 adapter = netdev_priv(netdev);
2808
2809 switch (pdev->device) {
2810 case BE_DEVICE_ID1:
2811 case OC_DEVICE_ID1:
2812 adapter->generation = BE_GEN2;
2813 break;
2814 case BE_DEVICE_ID2:
2815 case OC_DEVICE_ID2:
2816 adapter->generation = BE_GEN3;
2817 break;
2818 default:
2819 adapter->generation = 0;
2820 }
2821
2822 adapter->pdev = pdev; 2915 adapter->pdev = pdev;
2823 pci_set_drvdata(pdev, adapter); 2916 pci_set_drvdata(pdev, adapter);
2917
2918 status = be_dev_family_check(adapter);
2919 if (status)
2920 goto free_netdev;
2921
2824 adapter->netdev = netdev; 2922 adapter->netdev = netdev;
2825 SET_NETDEV_DEV(netdev, &pdev->dev); 2923 SET_NETDEV_DEV(netdev, &pdev->dev);
2826 2924
@@ -2895,7 +2993,7 @@ ctrl_clean:
2895 be_ctrl_cleanup(adapter); 2993 be_ctrl_cleanup(adapter);
2896free_netdev: 2994free_netdev:
2897 be_sriov_disable(adapter); 2995 be_sriov_disable(adapter);
2898 free_netdev(adapter->netdev); 2996 free_netdev(netdev);
2899 pci_set_drvdata(pdev, NULL); 2997 pci_set_drvdata(pdev, NULL);
2900rel_reg: 2998rel_reg:
2901 pci_release_regions(pdev); 2999 pci_release_regions(pdev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 062600be073..03209a37883 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56#include "bnx2_fw.h" 56#include "bnx2_fw.h"
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define DRV_MODULE_VERSION "2.0.18" 59#define DRV_MODULE_VERSION "2.0.20"
60#define DRV_MODULE_RELDATE "Oct 7, 2010" 60#define DRV_MODULE_RELDATE "Nov 24, 2010"
61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw" 61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw"
62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" 62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw" 63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw"
@@ -766,13 +766,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
766 int j; 766 int j;
767 767
768 rxr->rx_buf_ring = 768 rxr->rx_buf_ring =
769 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); 769 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
770 if (rxr->rx_buf_ring == NULL) 770 if (rxr->rx_buf_ring == NULL)
771 return -ENOMEM; 771 return -ENOMEM;
772 772
773 memset(rxr->rx_buf_ring, 0,
774 SW_RXBD_RING_SIZE * bp->rx_max_ring);
775
776 for (j = 0; j < bp->rx_max_ring; j++) { 773 for (j = 0; j < bp->rx_max_ring; j++) {
777 rxr->rx_desc_ring[j] = 774 rxr->rx_desc_ring[j] =
778 dma_alloc_coherent(&bp->pdev->dev, 775 dma_alloc_coherent(&bp->pdev->dev,
@@ -785,13 +782,11 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
785 } 782 }
786 783
787 if (bp->rx_pg_ring_size) { 784 if (bp->rx_pg_ring_size) {
788 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE * 785 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
789 bp->rx_max_pg_ring); 786 bp->rx_max_pg_ring);
790 if (rxr->rx_pg_ring == NULL) 787 if (rxr->rx_pg_ring == NULL)
791 return -ENOMEM; 788 return -ENOMEM;
792 789
793 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
794 bp->rx_max_pg_ring);
795 } 790 }
796 791
797 for (j = 0; j < bp->rx_max_pg_ring; j++) { 792 for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -4645,13 +4640,28 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4645 4640
4646 /* Wait for the current PCI transaction to complete before 4641 /* Wait for the current PCI transaction to complete before
4647 * issuing a reset. */ 4642 * issuing a reset. */
4648 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, 4643 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4649 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 4644 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4650 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 4645 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4651 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 4646 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4652 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 4647 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4653 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); 4648 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4654 udelay(5); 4649 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4650 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4651 udelay(5);
4652 } else { /* 5709 */
4653 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4654 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4655 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4656 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4657
4658 for (i = 0; i < 100; i++) {
4659 msleep(1);
4660 val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4661 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4662 break;
4663 }
4664 }
4655 4665
4656 /* Wait for the firmware to tell us it is ok to issue a reset. */ 4666 /* Wait for the firmware to tell us it is ok to issue a reset. */
4657 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); 4667 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@@ -4673,7 +4683,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4673 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4683 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4674 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4684 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4675 4685
4676 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val); 4686 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4677 4687
4678 } else { 4688 } else {
4679 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4689 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -7914,15 +7924,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7914 goto err_out_release; 7924 goto err_out_release;
7915 } 7925 }
7916 7926
7927 bnx2_set_power_state(bp, PCI_D0);
7928
7917 /* Configure byte swap and enable write to the reg_window registers. 7929 /* Configure byte swap and enable write to the reg_window registers.
7918 * Rely on CPU to do target byte swapping on big endian systems 7930 * Rely on CPU to do target byte swapping on big endian systems
7919 * The chip's target access swapping will not swap all accesses 7931 * The chip's target access swapping will not swap all accesses
7920 */ 7932 */
7921 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, 7933 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7922 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 7934 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7923 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 7935 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7924
7925 bnx2_set_power_state(bp, PCI_D0);
7926 7936
7927 bp->chip_id = REG_RD(bp, BNX2_MISC_ID); 7937 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7928 7938
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index bf4c3421067..5488a2e82fe 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -461,6 +461,8 @@ struct l2_fhdr {
461#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090 461#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090
462#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094 462#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094
463 463
464#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4
465#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16)
464 466
465/* 467/*
466 * pci_reg definition 468 * pci_reg definition
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 863e73a85fb..cfc25cf064d 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.60.00-4" 23#define DRV_MODULE_VERSION "1.60.00-6"
24#define DRV_MODULE_RELDATE "2010/11/01" 24#define DRV_MODULE_RELDATE "2010/11/29"
25#define BNX2X_BC_VER 0x040200 25#define BNX2X_BC_VER 0x040200
26 26
27#define BNX2X_MULTI_QUEUE 27#define BNX2X_MULTI_QUEUE
@@ -671,6 +671,10 @@ enum {
671 CAM_ISCSI_ETH_LINE, 671 CAM_ISCSI_ETH_LINE,
672 CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE 672 CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
673}; 673};
674/* number of MACs per function in NIG memory - used for SI mode */
675#define NIG_LLH_FUNC_MEM_SIZE 16
676/* number of entries in NIG_REG_LLHX_FUNC_MEM */
677#define NIG_LLH_FUNC_MEM_MAX_OFFSET 8
674 678
675#define BNX2X_VF_ID_INVALID 0xFF 679#define BNX2X_VF_ID_INVALID 0xFF
676 680
@@ -967,6 +971,8 @@ struct bnx2x {
967 u16 mf_ov; 971 u16 mf_ov;
968 u8 mf_mode; 972 u8 mf_mode;
969#define IS_MF(bp) (bp->mf_mode != 0) 973#define IS_MF(bp) (bp->mf_mode != 0)
974#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
975#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
970 976
971 u8 wol; 977 u8 wol;
972 978
@@ -1010,6 +1016,7 @@ struct bnx2x {
1010#define BNX2X_ACCEPT_ALL_UNICAST 0x0004 1016#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
1011#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008 1017#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
1012#define BNX2X_ACCEPT_BROADCAST 0x0010 1018#define BNX2X_ACCEPT_BROADCAST 0x0010
1019#define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020
1013#define BNX2X_PROMISCUOUS_MODE 0x10000 1020#define BNX2X_PROMISCUOUS_MODE 0x10000
1014 1021
1015 u32 rx_mode; 1022 u32 rx_mode;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 94d5f59d5a6..a4555edbe9c 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -698,6 +698,29 @@ void bnx2x_release_phy_lock(struct bnx2x *bp)
698 mutex_unlock(&bp->port.phy_mutex); 698 mutex_unlock(&bp->port.phy_mutex);
699} 699}
700 700
701/* calculates MF speed according to current linespeed and MF configuration */
702u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{
704 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) {
706 u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
707 FUNC_MF_CFG_MAX_BW_MASK) >>
708 FUNC_MF_CFG_MAX_BW_SHIFT;
709 /* Calculate the current MAX line speed limit for the DCC
710 * capable devices
711 */
712 if (IS_MF_SD(bp)) {
713 u16 vn_max_rate = maxCfg * 100;
714
715 if (vn_max_rate < line_speed)
716 line_speed = vn_max_rate;
717 } else /* IS_MF_SI(bp)) */
718 line_speed = (line_speed * maxCfg) / 100;
719 }
720
721 return line_speed;
722}
723
701void bnx2x_link_report(struct bnx2x *bp) 724void bnx2x_link_report(struct bnx2x *bp)
702{ 725{
703 if (bp->flags & MF_FUNC_DIS) { 726 if (bp->flags & MF_FUNC_DIS) {
@@ -713,17 +736,8 @@ void bnx2x_link_report(struct bnx2x *bp)
713 netif_carrier_on(bp->dev); 736 netif_carrier_on(bp->dev);
714 netdev_info(bp->dev, "NIC Link is Up, "); 737 netdev_info(bp->dev, "NIC Link is Up, ");
715 738
716 line_speed = bp->link_vars.line_speed; 739 line_speed = bnx2x_get_mf_speed(bp);
717 if (IS_MF(bp)) {
718 u16 vn_max_rate;
719 740
720 vn_max_rate =
721 ((bp->mf_config[BP_VN(bp)] &
722 FUNC_MF_CFG_MAX_BW_MASK) >>
723 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
724 if (vn_max_rate < line_speed)
725 line_speed = vn_max_rate;
726 }
727 pr_cont("%d Mbps ", line_speed); 741 pr_cont("%d Mbps ", line_speed);
728 742
729 if (bp->link_vars.duplex == DUPLEX_FULL) 743 if (bp->link_vars.duplex == DUPLEX_FULL)
@@ -1692,11 +1706,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1692 } 1706 }
1693 } 1707 }
1694 1708
1695 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 1709 if (skb_is_gso_v6(skb))
1696 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); 1710 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1697 1711 else if (skb_is_gso(skb))
1698 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 1712 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1699 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1700 1713
1701 return rc; 1714 return rc;
1702} 1715}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 6b28739c530..cb8f2a040a1 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -73,6 +73,16 @@ void bnx2x__link_status_update(struct bnx2x *bp);
73void bnx2x_link_report(struct bnx2x *bp); 73void bnx2x_link_report(struct bnx2x *bp);
74 74
75/** 75/**
76 * calculates MF speed according to current linespeed and MF
77 * configuration
78 *
79 * @param bp
80 *
81 * @return u16
82 */
83u16 bnx2x_get_mf_speed(struct bnx2x *bp);
84
85/**
76 * MSI-X slowpath interrupt handler 86 * MSI-X slowpath interrupt handler
77 * 87 *
78 * @param irq 88 * @param irq
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index d02ffbdc9f0..bd94827e5e5 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -45,14 +45,9 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
45 cmd->speed = bp->link_params.req_line_speed[cfg_idx]; 45 cmd->speed = bp->link_params.req_line_speed[cfg_idx];
46 cmd->duplex = bp->link_params.req_duplex[cfg_idx]; 46 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
47 } 47 }
48 if (IS_MF(bp)) {
49 u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
50 FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
51 100;
52 48
53 if (vn_max_rate < cmd->speed) 49 if (IS_MF(bp))
54 cmd->speed = vn_max_rate; 50 cmd->speed = bnx2x_get_mf_speed(bp);
55 }
56 51
57 if (bp->port.supported[cfg_idx] & SUPPORTED_TP) 52 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
58 cmd->port = PORT_TP; 53 cmd->port = PORT_TP;
@@ -87,18 +82,57 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
87{ 82{
88 struct bnx2x *bp = netdev_priv(dev); 83 struct bnx2x *bp = netdev_priv(dev);
89 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; 84 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
85 u32 speed;
90 86
91 if (IS_MF(bp)) 87 if (IS_MF_SD(bp))
92 return 0; 88 return 0;
93 89
94 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" 90 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
95 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" 91 " supported 0x%x advertising 0x%x speed %d speed_hi %d\n"
96 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" 92 " duplex %d port %d phy_address %d transceiver %d\n"
97 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", 93 " autoneg %d maxtxpkt %d maxrxpkt %d\n",
98 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, 94 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
95 cmd->speed_hi,
99 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 96 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
100 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 97 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
101 98
99 speed = cmd->speed;
100 speed |= (cmd->speed_hi << 16);
101
102 if (IS_MF_SI(bp)) {
103 u32 param = 0;
104 u32 line_speed = bp->link_vars.line_speed;
105
106 /* use 10G if no link detected */
107 if (!line_speed)
108 line_speed = 10000;
109
110 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
111 BNX2X_DEV_INFO("To set speed BC %X or higher "
112 "is required, please upgrade BC\n",
113 REQ_BC_VER_4_SET_MF_BW);
114 return -EINVAL;
115 }
116 if (line_speed < speed) {
117 BNX2X_DEV_INFO("New speed should be less or equal "
118 "to actual line speed\n");
119 return -EINVAL;
120 }
121 /* load old values */
122 param = bp->mf_config[BP_VN(bp)];
123
124 /* leave only MIN value */
125 param &= FUNC_MF_CFG_MIN_BW_MASK;
126
127 /* set new MAX value */
128 param |= (((speed * 100) / line_speed)
129 << FUNC_MF_CFG_MAX_BW_SHIFT)
130 & FUNC_MF_CFG_MAX_BW_MASK;
131
132 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
133 return 0;
134 }
135
102 cfg_idx = bnx2x_get_link_cfg_idx(bp); 136 cfg_idx = bnx2x_get_link_cfg_idx(bp);
103 old_multi_phy_config = bp->link_params.multi_phy_config; 137 old_multi_phy_config = bp->link_params.multi_phy_config;
104 switch (cmd->port) { 138 switch (cmd->port) {
@@ -168,8 +202,6 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
168 202
169 } else { /* forced speed */ 203 } else { /* forced speed */
170 /* advertise the requested speed and duplex if supported */ 204 /* advertise the requested speed and duplex if supported */
171 u32 speed = cmd->speed;
172 speed |= (cmd->speed_hi << 16);
173 switch (speed) { 205 switch (speed) {
174 case SPEED_10: 206 case SPEED_10:
175 if (cmd->duplex == DUPLEX_FULL) { 207 if (cmd->duplex == DUPLEX_FULL) {
@@ -1499,8 +1531,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1499 * updates that have been performed while interrupts were 1531 * updates that have been performed while interrupts were
1500 * disabled. 1532 * disabled.
1501 */ 1533 */
1502 if (bp->common.int_block == INT_BLOCK_IGU) 1534 if (bp->common.int_block == INT_BLOCK_IGU) {
1535 /* Disable local BHes to prevent a dead-lock situation between
1536 * sch_direct_xmit() and bnx2x_run_loopback() (calling
1537 * bnx2x_tx_int()), as both are taking netif_tx_lock().
1538 */
1539 local_bh_disable();
1503 bnx2x_tx_int(fp_tx); 1540 bnx2x_tx_int(fp_tx);
1541 local_bh_enable();
1542 }
1504 1543
1505 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); 1544 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1506 if (rx_idx != rx_start_idx + num_pkts) 1545 if (rx_idx != rx_start_idx + num_pkts)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 4cfd4e9b558..6555c477f89 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -434,7 +434,12 @@ struct shared_feat_cfg { /* NVRAM Offset */
434#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000 434#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000
435#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002 435#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002
436 436
437#define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100 437#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
438#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
439#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
440#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
441#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
442#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
438 443
439}; 444};
440 445
@@ -815,6 +820,9 @@ struct drv_func_mb {
815#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 820#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
816#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 821#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
817 822
823#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
824#define REQ_BC_VER_4_SET_MF_BW 0x00060202
825#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
818#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 826#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
819#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 827#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
820#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 828#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
@@ -888,6 +896,7 @@ struct drv_func_mb {
888 896
889 u32 drv_status; 897 u32 drv_status;
890#define DRV_STATUS_PMF 0x00000001 898#define DRV_STATUS_PMF 0x00000001
899#define DRV_STATUS_SET_MF_BW 0x00000004
891 900
892#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 901#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
893#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 902#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
@@ -988,12 +997,43 @@ struct func_mf_cfg {
988 997
989}; 998};
990 999
1000/* This structure is not applicable and should not be accessed on 57711 */
1001struct func_ext_cfg {
1002 u32 func_cfg;
1003#define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
1004#define MACP_FUNC_CFG_FLAGS_SHIFT 0
1005#define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
1006#define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
1007#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
1008#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
1009
1010 u32 iscsi_mac_addr_upper;
1011 u32 iscsi_mac_addr_lower;
1012
1013 u32 fcoe_mac_addr_upper;
1014 u32 fcoe_mac_addr_lower;
1015
1016 u32 fcoe_wwn_port_name_upper;
1017 u32 fcoe_wwn_port_name_lower;
1018
1019 u32 fcoe_wwn_node_name_upper;
1020 u32 fcoe_wwn_node_name_lower;
1021
1022 u32 preserve_data;
1023#define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
1024#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
1025#define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
1026#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
1027#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
1028};
1029
991struct mf_cfg { 1030struct mf_cfg {
992 1031
993 struct shared_mf_cfg shared_mf_config; 1032 struct shared_mf_cfg shared_mf_config;
994 struct port_mf_cfg port_mf_config[PORT_MAX]; 1033 struct port_mf_cfg port_mf_config[PORT_MAX];
995 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; 1034 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
996 1035
1036 struct func_ext_cfg func_ext_config[E1H_FUNC_MAX];
997}; 1037};
998 1038
999 1039
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 58091961925..38aeffef2a8 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -3904,7 +3904,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3904 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3904 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
3905 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3905 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
3906 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 3906 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
3907 return 0;; 3907 return 0;
3908 msleep(1); 3908 msleep(1);
3909 } 3909 }
3910 return -EINVAL; 3910 return -EINVAL;
@@ -3988,7 +3988,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3988 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3988 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
3989 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3989 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
3990 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 3990 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
3991 return 0;; 3991 return 0;
3992 msleep(1); 3992 msleep(1);
3993 } 3993 }
3994 3994
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 9709b856966..0068a1dbc06 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -2026,13 +2026,28 @@ static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2026 2026
2027static void bnx2x_read_mf_cfg(struct bnx2x *bp) 2027static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2028{ 2028{
2029 int vn; 2029 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2030 2030
2031 if (BP_NOMCP(bp)) 2031 if (BP_NOMCP(bp))
2032 return; /* what should be the default bvalue in this case */ 2032 return; /* what should be the default bvalue in this case */
2033 2033
2034 /* For 2 port configuration the absolute function number formula
2035 * is:
2036 * abs_func = 2 * vn + BP_PORT + BP_PATH
2037 *
2038 * and there are 4 functions per port
2039 *
2040 * For 4 port configuration it is
2041 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2042 *
2043 * and there are 2 functions per port
2044 */
2034 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2045 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2035 int /*abs*/func = 2*vn + BP_PORT(bp); 2046 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2047
2048 if (func >= E1H_FUNC_MAX)
2049 break;
2050
2036 bp->mf_config[vn] = 2051 bp->mf_config[vn] =
2037 MF_CFG_RD(bp, func_mf_config[func].config); 2052 MF_CFG_RD(bp, func_mf_config[func].config);
2038 } 2053 }
@@ -2248,10 +2263,21 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2248 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 2263 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2249 u8 unmatched_unicast = 0; 2264 u8 unmatched_unicast = 0;
2250 2265
2266 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2267 unmatched_unicast = 1;
2268
2251 if (filters & BNX2X_PROMISCUOUS_MODE) { 2269 if (filters & BNX2X_PROMISCUOUS_MODE) {
2252 /* promiscious - accept all, drop none */ 2270 /* promiscious - accept all, drop none */
2253 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0; 2271 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2254 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1; 2272 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2273 if (IS_MF_SI(bp)) {
2274 /*
2275 * SI mode defines to accept in promiscuos mode
2276 * only unmatched packets
2277 */
2278 unmatched_unicast = 1;
2279 accp_all_ucast = 0;
2280 }
2255 } 2281 }
2256 if (filters & BNX2X_ACCEPT_UNICAST) { 2282 if (filters & BNX2X_ACCEPT_UNICAST) {
2257 /* accept matched ucast */ 2283 /* accept matched ucast */
@@ -2260,6 +2286,11 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2260 if (filters & BNX2X_ACCEPT_MULTICAST) { 2286 if (filters & BNX2X_ACCEPT_MULTICAST) {
2261 /* accept matched mcast */ 2287 /* accept matched mcast */
2262 drop_all_mcast = 0; 2288 drop_all_mcast = 0;
2289 if (IS_MF_SI(bp))
2290 /* since mcast addresses won't arrive with ovlan,
2291 * fw needs to accept all of them in
2292 * switch-independent mode */
2293 accp_all_mcast = 1;
2263 } 2294 }
2264 if (filters & BNX2X_ACCEPT_ALL_UNICAST) { 2295 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2265 /* accept all mcast */ 2296 /* accept all mcast */
@@ -2372,7 +2403,7 @@ static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2372 /* calculate queue flags */ 2403 /* calculate queue flags */
2373 flags |= QUEUE_FLG_CACHE_ALIGN; 2404 flags |= QUEUE_FLG_CACHE_ALIGN;
2374 flags |= QUEUE_FLG_HC; 2405 flags |= QUEUE_FLG_HC;
2375 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0; 2406 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
2376 2407
2377 flags |= QUEUE_FLG_VLAN; 2408 flags |= QUEUE_FLG_VLAN;
2378 DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); 2409 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -2573,6 +2604,26 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
2573 */ 2604 */
2574} 2605}
2575 2606
2607/* called due to MCP event (on pmf):
2608 * reread new bandwidth configuration
2609 * configure FW
2610 * notify others function about the change
2611 */
2612static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2613{
2614 if (bp->link_vars.link_up) {
2615 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2616 bnx2x_link_sync_notify(bp);
2617 }
2618 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2619}
2620
2621static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2622{
2623 bnx2x_config_mf_bw(bp);
2624 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2625}
2626
2576static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 2627static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2577{ 2628{
2578 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 2629 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -2598,10 +2649,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2598 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 2649 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2599 } 2650 }
2600 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 2651 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2601 2652 bnx2x_config_mf_bw(bp);
2602 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2603 bnx2x_link_sync_notify(bp);
2604 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2605 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 2653 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2606 } 2654 }
2607 2655
@@ -3022,6 +3070,10 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3022 if (val & DRV_STATUS_DCC_EVENT_MASK) 3070 if (val & DRV_STATUS_DCC_EVENT_MASK)
3023 bnx2x_dcc_event(bp, 3071 bnx2x_dcc_event(bp,
3024 (val & DRV_STATUS_DCC_EVENT_MASK)); 3072 (val & DRV_STATUS_DCC_EVENT_MASK));
3073
3074 if (val & DRV_STATUS_SET_MF_BW)
3075 bnx2x_set_mf_bw(bp);
3076
3025 bnx2x__link_status_update(bp); 3077 bnx2x__link_status_update(bp);
3026 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3078 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3027 bnx2x_pmf_update(bp); 3079 bnx2x_pmf_update(bp);
@@ -4232,6 +4284,15 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
4232 bp->mf_mode); 4284 bp->mf_mode);
4233 } 4285 }
4234 4286
4287 if (IS_MF_SI(bp))
4288 /*
4289 * In switch independent mode, the TSTORM needs to accept
4290 * packets that failed classification, since approximate match
4291 * mac addresses aren't written to NIG LLH
4292 */
4293 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4294 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4295
4235 /* Zero this manually as its initialization is 4296 /* Zero this manually as its initialization is
4236 currently missing in the initTool */ 4297 currently missing in the initTool */
4237 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 4298 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -5048,12 +5109,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5048 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5109 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5049#endif 5110#endif
5050 if (!CHIP_IS_E1(bp)) 5111 if (!CHIP_IS_E1(bp))
5051 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp)); 5112 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
5052 5113
5053 if (CHIP_IS_E2(bp)) { 5114 if (CHIP_IS_E2(bp)) {
5054 /* Bit-map indicating which L2 hdrs may appear after the 5115 /* Bit-map indicating which L2 hdrs may appear after the
5055 basic Ethernet header */ 5116 basic Ethernet header */
5056 int has_ovlan = IS_MF(bp); 5117 int has_ovlan = IS_MF_SD(bp);
5057 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); 5118 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5058 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); 5119 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5059 } 5120 }
@@ -5087,7 +5148,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5087 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); 5148 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5088 5149
5089 if (CHIP_IS_E2(bp)) { 5150 if (CHIP_IS_E2(bp)) {
5090 int has_ovlan = IS_MF(bp); 5151 int has_ovlan = IS_MF_SD(bp);
5091 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); 5152 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5092 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); 5153 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5093 } 5154 }
@@ -5164,12 +5225,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5164 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); 5225 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5165 if (!CHIP_IS_E1(bp)) { 5226 if (!CHIP_IS_E1(bp)) {
5166 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 5227 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5167 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp)); 5228 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
5168 } 5229 }
5169 if (CHIP_IS_E2(bp)) { 5230 if (CHIP_IS_E2(bp)) {
5170 /* Bit-map indicating which L2 hdrs may appear after the 5231 /* Bit-map indicating which L2 hdrs may appear after the
5171 basic Ethernet header */ 5232 basic Ethernet header */
5172 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6)); 5233 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
5173 } 5234 }
5174 5235
5175 if (CHIP_REV_IS_SLOW(bp)) 5236 if (CHIP_REV_IS_SLOW(bp))
@@ -5386,7 +5447,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
5386 if (!CHIP_IS_E1(bp)) { 5447 if (!CHIP_IS_E1(bp)) {
5387 /* 0x2 disable mf_ov, 0x1 enable */ 5448 /* 0x2 disable mf_ov, 0x1 enable */
5388 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 5449 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5389 (IS_MF(bp) ? 0x1 : 0x2)); 5450 (IS_MF_SD(bp) ? 0x1 : 0x2));
5390 5451
5391 if (CHIP_IS_E2(bp)) { 5452 if (CHIP_IS_E2(bp)) {
5392 val = 0; 5453 val = 0;
@@ -6170,6 +6231,70 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6170 return BP_VN(bp) * 32 + rel_offset; 6231 return BP_VN(bp) * 32 + rel_offset;
6171} 6232}
6172 6233
6234/**
6235 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6236 * relevant. In addition, current implementation is tuned for a
6237 * single ETH MAC.
6238 *
6239 * When multiple unicast ETH MACs PF configuration in switch
6240 * independent mode is required (NetQ, multiple netdev MACs,
6241 * etc.), consider better utilisation of 16 per function MAC
6242 * entries in the LLH memory.
6243 */
6244enum {
6245 LLH_CAM_ISCSI_ETH_LINE = 0,
6246 LLH_CAM_ETH_LINE,
6247 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6248};
6249
6250static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6251 int set,
6252 unsigned char *dev_addr,
6253 int index)
6254{
6255 u32 wb_data[2];
6256 u32 mem_offset, ena_offset, mem_index;
6257 /**
6258 * indexes mapping:
6259 * 0..7 - goes to MEM
6260 * 8..15 - goes to MEM2
6261 */
6262
6263 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6264 return;
6265
6266 /* calculate memory start offset according to the mapping
6267 * and index in the memory */
6268 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6269 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6270 NIG_REG_LLH0_FUNC_MEM;
6271 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6272 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6273 mem_index = index;
6274 } else {
6275 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6276 NIG_REG_P0_LLH_FUNC_MEM2;
6277 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6278 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6279 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6280 }
6281
6282 if (set) {
6283 /* LLH_FUNC_MEM is a u64 WB register */
6284 mem_offset += 8*mem_index;
6285
6286 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6287 (dev_addr[4] << 8) | dev_addr[5]);
6288 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6289
6290 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6291 }
6292
6293 /* enable/disable the entry */
6294 REG_WR(bp, ena_offset + 4*mem_index, set);
6295
6296}
6297
6173void bnx2x_set_eth_mac(struct bnx2x *bp, int set) 6298void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6174{ 6299{
6175 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) : 6300 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
@@ -6179,6 +6304,8 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6179 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr, 6304 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6180 (1 << bp->fp->cl_id), cam_offset , 0); 6305 (1 << bp->fp->cl_id), cam_offset , 0);
6181 6306
6307 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6308
6182 if (CHIP_IS_E1(bp)) { 6309 if (CHIP_IS_E1(bp)) {
6183 /* broadcast MAC */ 6310 /* broadcast MAC */
6184 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 6311 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -6289,6 +6416,8 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6289 /* Send a SET_MAC ramrod */ 6416 /* Send a SET_MAC ramrod */
6290 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec, 6417 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6291 cam_offset, 0); 6418 cam_offset, 0);
6419
6420 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6292 return 0; 6421 return 0;
6293} 6422}
6294#endif 6423#endif
@@ -8076,9 +8205,8 @@ static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8076static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 8205static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8077{ 8206{
8078 int port = BP_PORT(bp); 8207 int port = BP_PORT(bp);
8079 u32 val, val2;
8080 u32 config; 8208 u32 config;
8081 u32 ext_phy_type, ext_phy_config;; 8209 u32 ext_phy_type, ext_phy_config;
8082 8210
8083 bp->link_params.bp = bp; 8211 bp->link_params.bp = bp;
8084 bp->link_params.port = port; 8212 bp->link_params.port = port;
@@ -8135,25 +8263,62 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8135 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 8263 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8136 bp->mdio.prtad = 8264 bp->mdio.prtad =
8137 XGXS_EXT_PHY_ADDR(ext_phy_config); 8265 XGXS_EXT_PHY_ADDR(ext_phy_config);
8266}
8138 8267
8139 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 8268static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8140 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 8269{
8141 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 8270 u32 val, val2;
8142 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 8271 int func = BP_ABS_FUNC(bp);
8143 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8272 int port = BP_PORT(bp);
8273
8274 if (BP_NOMCP(bp)) {
8275 BNX2X_ERROR("warning: random MAC workaround active\n");
8276 random_ether_addr(bp->dev->dev_addr);
8277 } else if (IS_MF(bp)) {
8278 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8279 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8280 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8281 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8282 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8283
8284#ifdef BCM_CNIC
8285 /* iSCSI NPAR MAC */
8286 if (IS_MF_SI(bp)) {
8287 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8288 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8289 val2 = MF_CFG_RD(bp, func_ext_config[func].
8290 iscsi_mac_addr_upper);
8291 val = MF_CFG_RD(bp, func_ext_config[func].
8292 iscsi_mac_addr_lower);
8293 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8294 }
8295 }
8296#endif
8297 } else {
8298 /* in SF read MACs from port configuration */
8299 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8300 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8301 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8144 8302
8145#ifdef BCM_CNIC 8303#ifdef BCM_CNIC
8146 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper); 8304 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8147 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower); 8305 iscsi_mac_upper);
8148 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8306 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8307 iscsi_mac_lower);
8308 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8149#endif 8309#endif
8310 }
8311
8312 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8313 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8314
8150} 8315}
8151 8316
8152static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8317static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8153{ 8318{
8154 int func = BP_ABS_FUNC(bp); 8319 int /*abs*/func = BP_ABS_FUNC(bp);
8155 int vn; 8320 int vn, port;
8156 u32 val, val2; 8321 u32 val = 0;
8157 int rc = 0; 8322 int rc = 0;
8158 8323
8159 bnx2x_get_common_hwinfo(bp); 8324 bnx2x_get_common_hwinfo(bp);
@@ -8186,44 +8351,99 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8186 bp->mf_ov = 0; 8351 bp->mf_ov = 0;
8187 bp->mf_mode = 0; 8352 bp->mf_mode = 0;
8188 vn = BP_E1HVN(bp); 8353 vn = BP_E1HVN(bp);
8354 port = BP_PORT(bp);
8355
8189 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 8356 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8357 DP(NETIF_MSG_PROBE,
8358 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8359 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8360 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
8190 if (SHMEM2_HAS(bp, mf_cfg_addr)) 8361 if (SHMEM2_HAS(bp, mf_cfg_addr))
8191 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 8362 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8192 else 8363 else
8193 bp->common.mf_cfg_base = bp->common.shmem_base + 8364 bp->common.mf_cfg_base = bp->common.shmem_base +
8194 offsetof(struct shmem_region, func_mb) + 8365 offsetof(struct shmem_region, func_mb) +
8195 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 8366 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8196 bp->mf_config[vn] = 8367 /*
8197 MF_CFG_RD(bp, func_mf_config[func].config); 8368 * get mf configuration:
8369 * 1. existance of MF configuration
8370 * 2. MAC address must be legal (check only upper bytes)
8371 * for Switch-Independent mode;
8372 * OVLAN must be legal for Switch-Dependent mode
8373 * 3. SF_MODE configures specific MF mode
8374 */
8375 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8376 /* get mf configuration */
8377 val = SHMEM_RD(bp,
8378 dev_info.shared_feature_config.config);
8379 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8380
8381 switch (val) {
8382 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8383 val = MF_CFG_RD(bp, func_mf_config[func].
8384 mac_upper);
8385 /* check for legal mac (upper bytes)*/
8386 if (val != 0xffff) {
8387 bp->mf_mode = MULTI_FUNCTION_SI;
8388 bp->mf_config[vn] = MF_CFG_RD(bp,
8389 func_mf_config[func].config);
8390 } else
8391 DP(NETIF_MSG_PROBE, "illegal MAC "
8392 "address for SI\n");
8393 break;
8394 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8395 /* get OV configuration */
8396 val = MF_CFG_RD(bp,
8397 func_mf_config[FUNC_0].e1hov_tag);
8398 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8399
8400 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8401 bp->mf_mode = MULTI_FUNCTION_SD;
8402 bp->mf_config[vn] = MF_CFG_RD(bp,
8403 func_mf_config[func].config);
8404 } else
8405 DP(NETIF_MSG_PROBE, "illegal OV for "
8406 "SD\n");
8407 break;
8408 default:
8409 /* Unknown configuration: reset mf_config */
8410 bp->mf_config[vn] = 0;
8411 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8412 val);
8413 }
8414 }
8198 8415
8199 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
8200 FUNC_MF_CFG_E1HOV_TAG_MASK);
8201 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8202 bp->mf_mode = 1;
8203 BNX2X_DEV_INFO("%s function mode\n", 8416 BNX2X_DEV_INFO("%s function mode\n",
8204 IS_MF(bp) ? "multi" : "single"); 8417 IS_MF(bp) ? "multi" : "single");
8205 8418
8206 if (IS_MF(bp)) { 8419 switch (bp->mf_mode) {
8207 val = (MF_CFG_RD(bp, func_mf_config[func]. 8420 case MULTI_FUNCTION_SD:
8208 e1hov_tag) & 8421 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8209 FUNC_MF_CFG_E1HOV_TAG_MASK); 8422 FUNC_MF_CFG_E1HOV_TAG_MASK;
8210 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 8423 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8211 bp->mf_ov = val; 8424 bp->mf_ov = val;
8212 BNX2X_DEV_INFO("MF OV for func %d is %d " 8425 BNX2X_DEV_INFO("MF OV for func %d is %d"
8213 "(0x%04x)\n", 8426 " (0x%04x)\n", func,
8214 func, bp->mf_ov, bp->mf_ov); 8427 bp->mf_ov, bp->mf_ov);
8215 } else { 8428 } else {
8216 BNX2X_ERROR("No valid MF OV for func %d," 8429 BNX2X_ERR("No valid MF OV for func %d,"
8217 " aborting\n", func); 8430 " aborting\n", func);
8218 rc = -EPERM; 8431 rc = -EPERM;
8219 } 8432 }
8220 } else { 8433 break;
8221 if (BP_VN(bp)) { 8434 case MULTI_FUNCTION_SI:
8222 BNX2X_ERROR("VN %d in single function mode," 8435 BNX2X_DEV_INFO("func %d is in MF "
8223 " aborting\n", BP_E1HVN(bp)); 8436 "switch-independent mode\n", func);
8437 break;
8438 default:
8439 if (vn) {
8440 BNX2X_ERR("VN %d in single function mode,"
8441 " aborting\n", vn);
8224 rc = -EPERM; 8442 rc = -EPERM;
8225 } 8443 }
8444 break;
8226 } 8445 }
8446
8227 } 8447 }
8228 8448
8229 /* adjust igu_sb_cnt to MF for E1x */ 8449 /* adjust igu_sb_cnt to MF for E1x */
@@ -8248,32 +8468,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8248 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 8468 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8249 } 8469 }
8250 8470
8251 if (IS_MF(bp)) { 8471 /* Get MAC addresses */
8252 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 8472 bnx2x_get_mac_hwinfo(bp);
8253 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8254 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8255 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8256 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8257 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8258 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8259 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8260 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8261 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8262 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8263 ETH_ALEN);
8264 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8265 ETH_ALEN);
8266 }
8267
8268 return rc;
8269 }
8270
8271 if (BP_NOMCP(bp)) {
8272 /* only supposed to happen on emulation/FPGA */
8273 BNX2X_ERROR("warning: random MAC workaround active\n");
8274 random_ether_addr(bp->dev->dev_addr);
8275 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8276 }
8277 8473
8278 return rc; 8474 return rc;
8279} 8475}
@@ -8761,7 +8957,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8761 dev->netdev_ops = &bnx2x_netdev_ops; 8957 dev->netdev_ops = &bnx2x_netdev_ops;
8762 bnx2x_set_ethtool_ops(dev); 8958 bnx2x_set_ethtool_ops(dev);
8763 dev->features |= NETIF_F_SG; 8959 dev->features |= NETIF_F_SG;
8764 dev->features |= NETIF_F_HW_CSUM; 8960 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
8765 if (bp->flags & USING_DAC_FLAG) 8961 if (bp->flags & USING_DAC_FLAG)
8766 dev->features |= NETIF_F_HIGHDMA; 8962 dev->features |= NETIF_F_HIGHDMA;
8767 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 8963 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -8769,7 +8965,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8769 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 8965 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8770 8966
8771 dev->vlan_features |= NETIF_F_SG; 8967 dev->vlan_features |= NETIF_F_SG;
8772 dev->vlan_features |= NETIF_F_HW_CSUM; 8968 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
8773 if (bp->flags & USING_DAC_FLAG) 8969 if (bp->flags & USING_DAC_FLAG)
8774 dev->vlan_features |= NETIF_F_HIGHDMA; 8970 dev->vlan_features |= NETIF_F_HIGHDMA;
8775 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 8971 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -9096,12 +9292,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9096 /* calc qm_cid_count */ 9292 /* calc qm_cid_count */
9097 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count); 9293 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9098 9294
9099 rc = register_netdev(dev);
9100 if (rc) {
9101 dev_err(&pdev->dev, "Cannot register net device\n");
9102 goto init_one_exit;
9103 }
9104
9105 /* Configure interupt mode: try to enable MSI-X/MSI if 9295 /* Configure interupt mode: try to enable MSI-X/MSI if
9106 * needed, set bp->num_queues appropriately. 9296 * needed, set bp->num_queues appropriately.
9107 */ 9297 */
@@ -9110,6 +9300,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9110 /* Add all NAPI objects */ 9300 /* Add all NAPI objects */
9111 bnx2x_add_all_napi(bp); 9301 bnx2x_add_all_napi(bp);
9112 9302
9303 rc = register_netdev(dev);
9304 if (rc) {
9305 dev_err(&pdev->dev, "Cannot register net device\n");
9306 goto init_one_exit;
9307 }
9308
9113 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 9309 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9114 9310
9115 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," 9311 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 1cefe489a95..64bdda189e5 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1774,6 +1774,8 @@
1774/* [RW 8] event id for llh0 */ 1774/* [RW 8] event id for llh0 */
1775#define NIG_REG_LLH0_EVENT_ID 0x10084 1775#define NIG_REG_LLH0_EVENT_ID 0x10084
1776#define NIG_REG_LLH0_FUNC_EN 0x160fc 1776#define NIG_REG_LLH0_FUNC_EN 0x160fc
1777#define NIG_REG_LLH0_FUNC_MEM 0x16180
1778#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
1777#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100 1779#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
1778/* [RW 1] Determine the IP version to look for in 1780/* [RW 1] Determine the IP version to look for in
1779 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */ 1781 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
@@ -1797,6 +1799,9 @@
1797#define NIG_REG_LLH1_ERROR_MASK 0x10090 1799#define NIG_REG_LLH1_ERROR_MASK 0x10090
1798/* [RW 8] event id for llh1 */ 1800/* [RW 8] event id for llh1 */
1799#define NIG_REG_LLH1_EVENT_ID 0x10088 1801#define NIG_REG_LLH1_EVENT_ID 0x10088
1802#define NIG_REG_LLH1_FUNC_MEM 0x161c0
1803#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
1804#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
1800/* [RW 8] init credit counter for port1 in LLH */ 1805/* [RW 8] init credit counter for port1 in LLH */
1801#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 1806#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
1802#define NIG_REG_LLH1_XCM_MASK 0x10134 1807#define NIG_REG_LLH1_XCM_MASK 0x10134
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 881914bc4e9..48cf24ff4e6 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2474,8 +2474,7 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2474 goto out; 2474 goto out;
2475 2475
2476 read_lock(&bond->lock); 2476 read_lock(&bond->lock);
2477 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), 2477 slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
2478 orig_dev);
2479 if (!slave) 2478 if (!slave)
2480 goto out_unlock; 2479 goto out_unlock;
2481 2480
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2fee00a4c9e..bb33b3b347f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -873,17 +873,11 @@ static void bond_mc_del(struct bonding *bond, void *addr)
873static void __bond_resend_igmp_join_requests(struct net_device *dev) 873static void __bond_resend_igmp_join_requests(struct net_device *dev)
874{ 874{
875 struct in_device *in_dev; 875 struct in_device *in_dev;
876 struct ip_mc_list *im;
877 876
878 rcu_read_lock(); 877 rcu_read_lock();
879 in_dev = __in_dev_get_rcu(dev); 878 in_dev = __in_dev_get_rcu(dev);
880 if (in_dev) { 879 if (in_dev)
881 read_lock(&in_dev->mc_list_lock); 880 ip_mc_rejoin_groups(in_dev);
882 for (im = in_dev->mc_list; im; im = im->next)
883 ip_mc_rejoin_group(im);
884 read_unlock(&in_dev->mc_list_lock);
885 }
886
887 rcu_read_unlock(); 881 rcu_read_unlock();
888} 882}
889 883
@@ -3211,7 +3205,7 @@ out:
3211#ifdef CONFIG_PROC_FS 3205#ifdef CONFIG_PROC_FS
3212 3206
3213static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) 3207static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3214 __acquires(&dev_base_lock) 3208 __acquires(RCU)
3215 __acquires(&bond->lock) 3209 __acquires(&bond->lock)
3216{ 3210{
3217 struct bonding *bond = seq->private; 3211 struct bonding *bond = seq->private;
@@ -3220,7 +3214,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3220 int i; 3214 int i;
3221 3215
3222 /* make sure the bond won't be taken away */ 3216 /* make sure the bond won't be taken away */
3223 read_lock(&dev_base_lock); 3217 rcu_read_lock();
3224 read_lock(&bond->lock); 3218 read_lock(&bond->lock);
3225 3219
3226 if (*pos == 0) 3220 if (*pos == 0)
@@ -3250,12 +3244,12 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3250 3244
3251static void bond_info_seq_stop(struct seq_file *seq, void *v) 3245static void bond_info_seq_stop(struct seq_file *seq, void *v)
3252 __releases(&bond->lock) 3246 __releases(&bond->lock)
3253 __releases(&dev_base_lock) 3247 __releases(RCU)
3254{ 3248{
3255 struct bonding *bond = seq->private; 3249 struct bonding *bond = seq->private;
3256 3250
3257 read_unlock(&bond->lock); 3251 read_unlock(&bond->lock);
3258 read_unlock(&dev_base_lock); 3252 rcu_read_unlock();
3259} 3253}
3260 3254
3261static void bond_info_show_master(struct seq_file *seq) 3255static void bond_info_show_master(struct seq_file *seq)
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4eedb12df6c..ad3ae46a4c0 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -286,7 +286,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
286 return NULL; 286 return NULL;
287 } 287 }
288 288
289 return (struct bonding *)netdev_priv(slave->dev->master); 289 return netdev_priv(slave->dev->master);
290} 290}
291 291
292static inline bool bond_is_lb(const struct bonding *bond) 292static inline bool bond_is_lb(const struct bonding *bond)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 080574b0fff..d5a9db60ade 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -12,6 +12,27 @@ config CAN_VCAN
12 This driver can also be built as a module. If so, the module 12 This driver can also be built as a module. If so, the module
13 will be called vcan. 13 will be called vcan.
14 14
15config CAN_SLCAN
16 tristate "Serial / USB serial CAN Adaptors (slcan)"
17 depends on CAN
18 default N
19 ---help---
20 CAN driver for several 'low cost' CAN interfaces that are attached
21 via serial lines or via USB-to-serial adapters using the LAWICEL
22 ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
23
24 As only the sending and receiving of CAN frames is implemented, this
25 driver should work with the (serial/USB) CAN hardware from:
26 www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
27
28 Userspace tools to attach the SLCAN line discipline (slcan_attach,
29 slcand) can be found in the can-utils at the SocketCAN SVN, see
30 http://developer.berlios.de/projects/socketcan for details.
31
32 The slcan driver supports up to 10 CAN netdevices by default which
33 can be changed by the 'maxdev=xx' module option. This driver can
34 also be built as a module. If so, the module will be called slcan.
35
15config CAN_DEV 36config CAN_DEV
16 tristate "Platform CAN drivers with Netlink support" 37 tristate "Platform CAN drivers with Netlink support"
17 depends on CAN 38 depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 90af15a4f10..07ca159ba3f 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_CAN_VCAN) += vcan.o 5obj-$(CONFIG_CAN_VCAN) += vcan.o
6obj-$(CONFIG_CAN_SLCAN) += slcan.o
6 7
7obj-$(CONFIG_CAN_DEV) += can-dev.o 8obj-$(CONFIG_CAN_DEV) += can-dev.o
8can-dev-y := dev.o 9can-dev-y := dev.o
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 64c378cd0c3..74cd880c7e0 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -182,7 +182,7 @@ static int mscan_restart(struct net_device *dev)
182 182
183 priv->can.state = CAN_STATE_ERROR_ACTIVE; 183 priv->can.state = CAN_STATE_ERROR_ACTIVE;
184 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD), 184 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
185 "bus-off state expected"); 185 "bus-off state expected\n");
186 out_8(&regs->canmisc, MSCAN_BOHOLD); 186 out_8(&regs->canmisc, MSCAN_BOHOLD);
187 /* Re-enable receive interrupts. */ 187 /* Re-enable receive interrupts. */
188 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE); 188 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 672718261c6..a9b6a6525a6 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -32,99 +32,91 @@
32#include <linux/can/dev.h> 32#include <linux/can/dev.h>
33#include <linux/can/error.h> 33#include <linux/can/error.h>
34 34
35#define MAX_MSG_OBJ 32 35#define PCH_ENABLE 1 /* The enable flag */
36#define MSG_OBJ_RX 0 /* The receive message object flag. */ 36#define PCH_DISABLE 0 /* The disable flag */
37#define MSG_OBJ_TX 1 /* The transmit message object flag. */ 37#define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
38 38#define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
39#define ENABLE 1 /* The enable flag */ 39#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
40#define DISABLE 0 /* The disable flag */ 40#define PCH_CTRL_CCE BIT(6)
41#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */ 41#define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
42#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */ 42#define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
43#define CAN_CTRL_IE_SIE_EIE 0x000e 43#define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
44#define CAN_CTRL_CCE 0x0040 44
45#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */ 45#define PCH_CMASK_RX_TX_SET 0x00f3
46#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */ 46#define PCH_CMASK_RX_TX_GET 0x0073
47#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */ 47#define PCH_CMASK_ALL 0xff
48#define CAN_CMASK_RX_TX_SET 0x00f3 48#define PCH_CMASK_NEWDAT BIT(2)
49#define CAN_CMASK_RX_TX_GET 0x0073 49#define PCH_CMASK_CLRINTPND BIT(3)
50#define CAN_CMASK_ALL 0xff 50#define PCH_CMASK_CTRL BIT(4)
51#define CAN_CMASK_RDWR 0x80 51#define PCH_CMASK_ARB BIT(5)
52#define CAN_CMASK_ARB 0x20 52#define PCH_CMASK_MASK BIT(6)
53#define CAN_CMASK_CTRL 0x10 53#define PCH_CMASK_RDWR BIT(7)
54#define CAN_CMASK_MASK 0x40 54#define PCH_IF_MCONT_NEWDAT BIT(15)
55#define CAN_CMASK_NEWDAT 0x04 55#define PCH_IF_MCONT_MSGLOST BIT(14)
56#define CAN_CMASK_CLRINTPND 0x08 56#define PCH_IF_MCONT_INTPND BIT(13)
57 57#define PCH_IF_MCONT_UMASK BIT(12)
58#define CAN_IF_MCONT_NEWDAT 0x8000 58#define PCH_IF_MCONT_TXIE BIT(11)
59#define CAN_IF_MCONT_INTPND 0x2000 59#define PCH_IF_MCONT_RXIE BIT(10)
60#define CAN_IF_MCONT_UMASK 0x1000 60#define PCH_IF_MCONT_RMTEN BIT(9)
61#define CAN_IF_MCONT_TXIE 0x0800 61#define PCH_IF_MCONT_TXRQXT BIT(8)
62#define CAN_IF_MCONT_RXIE 0x0400 62#define PCH_IF_MCONT_EOB BIT(7)
63#define CAN_IF_MCONT_RMTEN 0x0200 63#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
64#define CAN_IF_MCONT_TXRQXT 0x0100 64#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
65#define CAN_IF_MCONT_EOB 0x0080 65#define PCH_ID2_DIR BIT(13)
66#define CAN_IF_MCONT_DLC 0x000f 66#define PCH_ID2_XTD BIT(14)
67#define CAN_IF_MCONT_MSGLOST 0x4000 67#define PCH_ID_MSGVAL BIT(15)
68#define CAN_MASK2_MDIR_MXTD 0xc000 68#define PCH_IF_CREQ_BUSY BIT(15)
69#define CAN_ID2_DIR 0x2000 69
70#define CAN_ID_MSGVAL 0x8000 70#define PCH_STATUS_INT 0x8000
71 71#define PCH_REC 0x00007f00
72#define CAN_STATUS_INT 0x8000 72#define PCH_TEC 0x000000ff
73#define CAN_IF_CREQ_BUSY 0x8000 73
74#define CAN_ID2_XTD 0x4000 74#define PCH_TX_OK BIT(3)
75 75#define PCH_RX_OK BIT(4)
76#define CAN_REC 0x00007f00 76#define PCH_EPASSIV BIT(5)
77#define CAN_TEC 0x000000ff 77#define PCH_EWARN BIT(6)
78 78#define PCH_BUS_OFF BIT(7)
79#define PCH_RX_OK 0x00000010
80#define PCH_TX_OK 0x00000008
81#define PCH_BUS_OFF 0x00000080
82#define PCH_EWARN 0x00000040
83#define PCH_EPASSIV 0x00000020
84#define PCH_LEC0 0x00000001
85#define PCH_LEC1 0x00000002
86#define PCH_LEC2 0x00000004
87#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
88#define PCH_STUF_ERR PCH_LEC0
89#define PCH_FORM_ERR PCH_LEC1
90#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
91#define PCH_BIT1_ERR PCH_LEC2
92#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
93#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
94 79
95/* bit position of certain controller bits. */ 80/* bit position of certain controller bits. */
96#define BIT_BITT_BRP 0 81#define PCH_BIT_BRP 0
97#define BIT_BITT_SJW 6 82#define PCH_BIT_SJW 6
98#define BIT_BITT_TSEG1 8 83#define PCH_BIT_TSEG1 8
99#define BIT_BITT_TSEG2 12 84#define PCH_BIT_TSEG2 12
100#define BIT_IF1_MCONT_RXIE 10 85#define PCH_BIT_BRPE_BRPE 6
101#define BIT_IF2_MCONT_TXIE 11 86#define PCH_MSK_BITT_BRP 0x3f
102#define BIT_BRPE_BRPE 6 87#define PCH_MSK_BRPE_BRPE 0x3c0
103#define BIT_ES_TXERRCNT 0 88#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
104#define BIT_ES_RXERRCNT 8 89#define PCH_COUNTER_LIMIT 10
105#define MSK_BITT_BRP 0x3f
106#define MSK_BITT_SJW 0xc0
107#define MSK_BITT_TSEG1 0xf00
108#define MSK_BITT_TSEG2 0x7000
109#define MSK_BRPE_BRPE 0x3c0
110#define MSK_BRPE_GET 0x0f
111#define MSK_CTRL_IE_SIE_EIE 0x07
112#define MSK_MCONT_TXIE 0x08
113#define MSK_MCONT_RXIE 0x10
114#define PCH_CAN_NO_TX_BUFF 1
115#define COUNTER_LIMIT 10
116 90
117#define PCH_CAN_CLK 50000000 /* 50MHz */ 91#define PCH_CAN_CLK 50000000 /* 50MHz */
118 92
119/* Define the number of message object. 93/* Define the number of message object.
120 * PCH CAN communications are done via Message RAM. 94 * PCH CAN communications are done via Message RAM.
121 * The Message RAM consists of 32 message objects. */ 95 * The Message RAM consists of 32 message objects. */
122#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/ 96#define PCH_RX_OBJ_NUM 26
123#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/ 97#define PCH_TX_OBJ_NUM 6
124#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM) 98#define PCH_RX_OBJ_START 1
99#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
100#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
101#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
125 102
126#define PCH_FIFO_THRESH 16 103#define PCH_FIFO_THRESH 16
127 104
105enum pch_ifreg {
106 PCH_RX_IFREG,
107 PCH_TX_IFREG,
108};
109
110enum pch_can_err {
111 PCH_STUF_ERR = 1,
112 PCH_FORM_ERR,
113 PCH_ACK_ERR,
114 PCH_BIT1_ERR,
115 PCH_BIT0_ERR,
116 PCH_CRC_ERR,
117 PCH_LEC_ALL,
118};
119
128enum pch_can_mode { 120enum pch_can_mode {
129 PCH_CAN_ENABLE, 121 PCH_CAN_ENABLE,
130 PCH_CAN_DISABLE, 122 PCH_CAN_DISABLE,
@@ -134,6 +126,21 @@ enum pch_can_mode {
134 PCH_CAN_RUN 126 PCH_CAN_RUN
135}; 127};
136 128
129struct pch_can_if_regs {
130 u32 creq;
131 u32 cmask;
132 u32 mask1;
133 u32 mask2;
134 u32 id1;
135 u32 id2;
136 u32 mcont;
137 u32 dataa1;
138 u32 dataa2;
139 u32 datab1;
140 u32 datab2;
141 u32 rsv[13];
142};
143
137struct pch_can_regs { 144struct pch_can_regs {
138 u32 cont; 145 u32 cont;
139 u32 stat; 146 u32 stat;
@@ -142,38 +149,21 @@ struct pch_can_regs {
142 u32 intr; 149 u32 intr;
143 u32 opt; 150 u32 opt;
144 u32 brpe; 151 u32 brpe;
145 u32 reserve1; 152 u32 reserve;
146 u32 if1_creq; 153 struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */
147 u32 if1_cmask; 154 u32 reserve1[8];
148 u32 if1_mask1;
149 u32 if1_mask2;
150 u32 if1_id1;
151 u32 if1_id2;
152 u32 if1_mcont;
153 u32 if1_dataa1;
154 u32 if1_dataa2;
155 u32 if1_datab1;
156 u32 if1_datab2;
157 u32 reserve2;
158 u32 reserve3[12];
159 u32 if2_creq;
160 u32 if2_cmask;
161 u32 if2_mask1;
162 u32 if2_mask2;
163 u32 if2_id1;
164 u32 if2_id2;
165 u32 if2_mcont;
166 u32 if2_dataa1;
167 u32 if2_dataa2;
168 u32 if2_datab1;
169 u32 if2_datab2;
170 u32 reserve4;
171 u32 reserve5[20];
172 u32 treq1; 155 u32 treq1;
173 u32 treq2; 156 u32 treq2;
174 u32 reserve6[2]; 157 u32 reserve2[6];
175 u32 reserve7[56]; 158 u32 data1;
176 u32 reserve8[3]; 159 u32 data2;
160 u32 reserve3[6];
161 u32 canipend1;
162 u32 canipend2;
163 u32 reserve4[6];
164 u32 canmval1;
165 u32 canmval2;
166 u32 reserve5[37];
177 u32 srst; 167 u32 srst;
178}; 168};
179 169
@@ -181,14 +171,13 @@ struct pch_can_priv {
181 struct can_priv can; 171 struct can_priv can;
182 unsigned int can_num; 172 unsigned int can_num;
183 struct pci_dev *dev; 173 struct pci_dev *dev;
184 unsigned int tx_enable[MAX_MSG_OBJ]; 174 int tx_enable[PCH_TX_OBJ_END];
185 unsigned int rx_enable[MAX_MSG_OBJ]; 175 int rx_enable[PCH_TX_OBJ_END];
186 unsigned int rx_link[MAX_MSG_OBJ]; 176 int rx_link[PCH_TX_OBJ_END];
187 unsigned int int_enables; 177 unsigned int int_enables;
188 unsigned int int_stat; 178 unsigned int int_stat;
189 struct net_device *ndev; 179 struct net_device *ndev;
190 spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/ 180 unsigned int msg_obj[PCH_TX_OBJ_END];
191 unsigned int msg_obj[MAX_MSG_OBJ];
192 struct pch_can_regs __iomem *regs; 181 struct pch_can_regs __iomem *regs;
193 struct napi_struct napi; 182 struct napi_struct napi;
194 unsigned int tx_obj; /* Point next Tx Obj index */ 183 unsigned int tx_obj; /* Point next Tx Obj index */
@@ -228,11 +217,11 @@ static void pch_can_set_run_mode(struct pch_can_priv *priv,
228{ 217{
229 switch (mode) { 218 switch (mode) {
230 case PCH_CAN_RUN: 219 case PCH_CAN_RUN:
231 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT); 220 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
232 break; 221 break;
233 222
234 case PCH_CAN_STOP: 223 case PCH_CAN_STOP:
235 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT); 224 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
236 break; 225 break;
237 226
238 default: 227 default:
@@ -246,30 +235,30 @@ static void pch_can_set_optmode(struct pch_can_priv *priv)
246 u32 reg_val = ioread32(&priv->regs->opt); 235 u32 reg_val = ioread32(&priv->regs->opt);
247 236
248 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 237 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
249 reg_val |= CAN_OPT_SILENT; 238 reg_val |= PCH_OPT_SILENT;
250 239
251 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 240 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
252 reg_val |= CAN_OPT_LBACK; 241 reg_val |= PCH_OPT_LBACK;
253 242
254 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT); 243 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
255 iowrite32(reg_val, &priv->regs->opt); 244 iowrite32(reg_val, &priv->regs->opt);
256} 245}
257 246
258static void pch_can_set_int_custom(struct pch_can_priv *priv) 247static void pch_can_set_int_custom(struct pch_can_priv *priv)
259{ 248{
260 /* Clearing the IE, SIE and EIE bits of Can control register. */ 249 /* Clearing the IE, SIE and EIE bits of Can control register. */
261 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE); 250 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
262 251
263 /* Appropriately setting them. */ 252 /* Appropriately setting them. */
264 pch_can_bit_set(&priv->regs->cont, 253 pch_can_bit_set(&priv->regs->cont,
265 ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1)); 254 ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
266} 255}
267 256
268/* This function retrieves interrupt enabled for the CAN device. */ 257/* This function retrieves interrupt enabled for the CAN device. */
269static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables) 258static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
270{ 259{
271 /* Obtaining the status of IE, SIE and EIE interrupt bits. */ 260 /* Obtaining the status of IE, SIE and EIE interrupt bits. */
272 *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1); 261 *enables = ((ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1);
273} 262}
274 263
275static void pch_can_set_int_enables(struct pch_can_priv *priv, 264static void pch_can_set_int_enables(struct pch_can_priv *priv,
@@ -277,19 +266,19 @@ static void pch_can_set_int_enables(struct pch_can_priv *priv,
277{ 266{
278 switch (interrupt_no) { 267 switch (interrupt_no) {
279 case PCH_CAN_ENABLE: 268 case PCH_CAN_ENABLE:
280 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE); 269 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE);
281 break; 270 break;
282 271
283 case PCH_CAN_DISABLE: 272 case PCH_CAN_DISABLE:
284 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE); 273 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
285 break; 274 break;
286 275
287 case PCH_CAN_ALL: 276 case PCH_CAN_ALL:
288 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE); 277 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
289 break; 278 break;
290 279
291 case PCH_CAN_NONE: 280 case PCH_CAN_NONE:
292 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE); 281 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
293 break; 282 break;
294 283
295 default: 284 default:
@@ -300,12 +289,12 @@ static void pch_can_set_int_enables(struct pch_can_priv *priv,
300 289
301static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num) 290static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
302{ 291{
303 u32 counter = COUNTER_LIMIT; 292 u32 counter = PCH_COUNTER_LIMIT;
304 u32 ifx_creq; 293 u32 ifx_creq;
305 294
306 iowrite32(num, creq_addr); 295 iowrite32(num, creq_addr);
307 while (counter) { 296 while (counter) {
308 ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY; 297 ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
309 if (!ifx_creq) 298 if (!ifx_creq)
310 break; 299 break;
311 counter--; 300 counter--;
@@ -315,143 +304,76 @@ static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
315 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__); 304 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
316} 305}
317 306
318static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num, 307static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
319 u32 set) 308 u32 set, enum pch_ifreg dir)
320{ 309{
321 unsigned long flags; 310 u32 ie;
311
312 if (dir)
313 ie = PCH_IF_MCONT_TXIE;
314 else
315 ie = PCH_IF_MCONT_RXIE;
322 316
323 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
324 /* Reading the receive buffer data from RAM to Interface1 registers */ 317 /* Reading the receive buffer data from RAM to Interface1 registers */
325 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 318 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
326 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num); 319 pch_can_check_if_busy(&priv->regs->ifregs[dir].creq, buff_num);
327 320
328 /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */ 321 /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
329 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL, 322 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
330 &priv->regs->if1_cmask); 323 &priv->regs->ifregs[dir].cmask);
331 324
332 if (set == ENABLE) { 325 if (set == PCH_ENABLE) {
333 /* Setting the MsgVal and RxIE bits */ 326 /* Setting the MsgVal and RxIE bits */
334 pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE); 327 pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
335 pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL); 328 pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
336 329
337 } else if (set == DISABLE) { 330 } else if (set == PCH_DISABLE) {
338 /* Resetting the MsgVal and RxIE bits */ 331 /* Resetting the MsgVal and RxIE bits */
339 pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE); 332 pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
340 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL); 333 pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
341 } 334 }
342 335
343 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num); 336 pch_can_check_if_busy(&priv->regs->ifregs[dir].creq, buff_num);
344 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
345} 337}
346 338
347static void pch_can_rx_enable_all(struct pch_can_priv *priv) 339static void pch_can_set_rx_all(struct pch_can_priv *priv, u32 set)
348{ 340{
349 int i; 341 int i;
350 342
351 /* Traversing to obtain the object configured as receivers. */ 343 /* Traversing to obtain the object configured as receivers. */
352 for (i = 0; i < PCH_OBJ_NUM; i++) { 344 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
353 if (priv->msg_obj[i] == MSG_OBJ_RX) 345 pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
354 pch_can_set_rx_enable(priv, i + 1, ENABLE);
355 }
356}
357
358static void pch_can_rx_disable_all(struct pch_can_priv *priv)
359{
360 int i;
361
362 /* Traversing to obtain the object configured as receivers. */
363 for (i = 0; i < PCH_OBJ_NUM; i++) {
364 if (priv->msg_obj[i] == MSG_OBJ_RX)
365 pch_can_set_rx_enable(priv, i + 1, DISABLE);
366 }
367}
368
369static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
370 u32 set)
371{
372 unsigned long flags;
373
374 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
375 /* Reading the Msg buffer from Message RAM to Interface2 registers. */
376 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
377 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
378
379 /* Setting the IF2CMASK register for accessing the
380 MsgVal and TxIE bits */
381 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
382 &priv->regs->if2_cmask);
383
384 if (set == ENABLE) {
385 /* Setting the MsgVal and TxIE bits */
386 pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
387 pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
388 } else if (set == DISABLE) {
389 /* Resetting the MsgVal and TxIE bits. */
390 pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
391 pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
392 }
393
394 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
395 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
396} 346}
397 347
398static void pch_can_tx_enable_all(struct pch_can_priv *priv) 348static void pch_can_set_tx_all(struct pch_can_priv *priv, u32 set)
399{ 349{
400 int i; 350 int i;
401 351
402 /* Traversing to obtain the object configured as transmit object. */ 352 /* Traversing to obtain the object configured as transmit object. */
403 for (i = 0; i < PCH_OBJ_NUM; i++) { 353 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
404 if (priv->msg_obj[i] == MSG_OBJ_TX) 354 pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
405 pch_can_set_tx_enable(priv, i + 1, ENABLE);
406 }
407}
408
409static void pch_can_tx_disable_all(struct pch_can_priv *priv)
410{
411 int i;
412
413 /* Traversing to obtain the object configured as transmit object. */
414 for (i = 0; i < PCH_OBJ_NUM; i++) {
415 if (priv->msg_obj[i] == MSG_OBJ_TX)
416 pch_can_set_tx_enable(priv, i + 1, DISABLE);
417 }
418} 355}
419 356
420static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num, 357static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
421 u32 *enable) 358 enum pch_ifreg dir)
422{ 359{
423 unsigned long flags; 360 u32 ie, enable;
424 361
425 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 362 if (dir)
426 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 363 ie = PCH_IF_MCONT_RXIE;
427 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
428
429 if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
430 ((ioread32(&priv->regs->if1_mcont)) &
431 CAN_IF_MCONT_RXIE))
432 *enable = ENABLE;
433 else 364 else
434 *enable = DISABLE; 365 ie = PCH_IF_MCONT_TXIE;
435 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
436}
437 366
438static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num, 367 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
439 u32 *enable) 368 pch_can_check_if_busy(&priv->regs->ifregs[dir].creq, buff_num);
440{
441 unsigned long flags;
442
443 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
444 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
445 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
446 369
447 if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) && 370 if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
448 ((ioread32(&priv->regs->if2_mcont)) & 371 ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie)) {
449 CAN_IF_MCONT_TXIE)) { 372 enable = 1;
450 *enable = ENABLE;
451 } else { 373 } else {
452 *enable = DISABLE; 374 enable = 0;
453 } 375 }
454 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags); 376 return enable;
455} 377}
456 378
457static int pch_can_int_pending(struct pch_can_priv *priv) 379static int pch_can_int_pending(struct pch_can_priv *priv)
@@ -462,141 +384,131 @@ static int pch_can_int_pending(struct pch_can_priv *priv)
462static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv, 384static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
463 u32 buffer_num, u32 set) 385 u32 buffer_num, u32 set)
464{ 386{
465 unsigned long flags; 387 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
466 388 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, buffer_num);
467 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 389 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
468 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 390 &priv->regs->ifregs[0].cmask);
469 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num); 391 if (set == PCH_ENABLE)
470 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask); 392 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
471 if (set == ENABLE) 393 PCH_IF_MCONT_EOB);
472 pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
473 else 394 else
474 pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB); 395 pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
475 396
476 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num); 397 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, buffer_num);
477 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
478} 398}
479 399
480static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv, 400static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
481 u32 buffer_num, u32 *link) 401 u32 buffer_num, u32 *link)
482{ 402{
483 unsigned long flags; 403 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
484 404 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, buffer_num);
485 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
486 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
487 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
488 405
489 if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB) 406 if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
490 *link = DISABLE; 407 *link = PCH_DISABLE;
491 else 408 else
492 *link = ENABLE; 409 *link = PCH_ENABLE;
493 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
494} 410}
495 411
496static void pch_can_clear_buffers(struct pch_can_priv *priv) 412static void pch_can_clear_buffers(struct pch_can_priv *priv)
497{ 413{
498 int i; 414 int i;
499 415
500 for (i = 0; i < PCH_RX_OBJ_NUM; i++) { 416 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
501 iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask); 417 iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
502 iowrite32(0xffff, &priv->regs->if1_mask1); 418 iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
503 iowrite32(0xffff, &priv->regs->if1_mask2); 419 iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
504 iowrite32(0x0, &priv->regs->if1_id1); 420 iowrite32(0x0, &priv->regs->ifregs[0].id1);
505 iowrite32(0x0, &priv->regs->if1_id2); 421 iowrite32(0x0, &priv->regs->ifregs[0].id2);
506 iowrite32(0x0, &priv->regs->if1_mcont); 422 iowrite32(0x0, &priv->regs->ifregs[0].mcont);
507 iowrite32(0x0, &priv->regs->if1_dataa1); 423 iowrite32(0x0, &priv->regs->ifregs[0].dataa1);
508 iowrite32(0x0, &priv->regs->if1_dataa2); 424 iowrite32(0x0, &priv->regs->ifregs[0].dataa2);
509 iowrite32(0x0, &priv->regs->if1_datab1); 425 iowrite32(0x0, &priv->regs->ifregs[0].datab1);
510 iowrite32(0x0, &priv->regs->if1_datab2); 426 iowrite32(0x0, &priv->regs->ifregs[0].datab2);
511 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK | 427 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
512 CAN_CMASK_ARB | CAN_CMASK_CTRL, 428 PCH_CMASK_ARB | PCH_CMASK_CTRL,
513 &priv->regs->if1_cmask); 429 &priv->regs->ifregs[0].cmask);
514 pch_can_check_if_busy(&priv->regs->if1_creq, i+1); 430 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, i);
515 } 431 }
516 432
517 for (i = i; i < PCH_OBJ_NUM; i++) { 433 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
518 iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask); 434 iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[1].cmask);
519 iowrite32(0xffff, &priv->regs->if2_mask1); 435 iowrite32(0xffff, &priv->regs->ifregs[1].mask1);
520 iowrite32(0xffff, &priv->regs->if2_mask2); 436 iowrite32(0xffff, &priv->regs->ifregs[1].mask2);
521 iowrite32(0x0, &priv->regs->if2_id1); 437 iowrite32(0x0, &priv->regs->ifregs[1].id1);
522 iowrite32(0x0, &priv->regs->if2_id2); 438 iowrite32(0x0, &priv->regs->ifregs[1].id2);
523 iowrite32(0x0, &priv->regs->if2_mcont); 439 iowrite32(0x0, &priv->regs->ifregs[1].mcont);
524 iowrite32(0x0, &priv->regs->if2_dataa1); 440 iowrite32(0x0, &priv->regs->ifregs[1].dataa1);
525 iowrite32(0x0, &priv->regs->if2_dataa2); 441 iowrite32(0x0, &priv->regs->ifregs[1].dataa2);
526 iowrite32(0x0, &priv->regs->if2_datab1); 442 iowrite32(0x0, &priv->regs->ifregs[1].datab1);
527 iowrite32(0x0, &priv->regs->if2_datab2); 443 iowrite32(0x0, &priv->regs->ifregs[1].datab2);
528 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK | 444 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
529 CAN_CMASK_ARB | CAN_CMASK_CTRL, 445 PCH_CMASK_ARB | PCH_CMASK_CTRL,
530 &priv->regs->if2_cmask); 446 &priv->regs->ifregs[1].cmask);
531 pch_can_check_if_busy(&priv->regs->if2_creq, i+1); 447 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, i);
532 } 448 }
533} 449}
534 450
535static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv) 451static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
536{ 452{
537 int i; 453 int i;
538 unsigned long flags;
539 454
540 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 455 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
456 iowrite32(PCH_CMASK_RX_TX_GET,
457 &priv->regs->ifregs[0].cmask);
458 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, i);
541 459
542 for (i = 0; i < PCH_OBJ_NUM; i++) { 460 iowrite32(0x0, &priv->regs->ifregs[0].id1);
543 if (priv->msg_obj[i] == MSG_OBJ_RX) { 461 iowrite32(0x0, &priv->regs->ifregs[0].id2);
544 iowrite32(CAN_CMASK_RX_TX_GET,
545 &priv->regs->if1_cmask);
546 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
547 462
548 iowrite32(0x0, &priv->regs->if1_id1); 463 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
549 iowrite32(0x0, &priv->regs->if1_id2); 464 PCH_IF_MCONT_UMASK);
550 465
551 pch_can_bit_set(&priv->regs->if1_mcont, 466 /* Set FIFO mode set to 0 except last Rx Obj*/
552 CAN_IF_MCONT_UMASK); 467 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
468 PCH_IF_MCONT_EOB);
469 /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
470 if (i == PCH_RX_OBJ_END)
471 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
472 PCH_IF_MCONT_EOB);
553 473
554 /* Set FIFO mode set to 0 except last Rx Obj*/ 474 iowrite32(0, &priv->regs->ifregs[0].mask1);
555 pch_can_bit_clear(&priv->regs->if1_mcont, 475 pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
556 CAN_IF_MCONT_EOB); 476 0x1fff | PCH_MASK2_MDIR_MXTD);
557 /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
558 if (i == (PCH_RX_OBJ_NUM - 1))
559 pch_can_bit_set(&priv->regs->if1_mcont,
560 CAN_IF_MCONT_EOB);
561 477
562 iowrite32(0, &priv->regs->if1_mask1); 478 /* Setting CMASK for writing */
563 pch_can_bit_clear(&priv->regs->if1_mask2, 479 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
564 0x1fff | CAN_MASK2_MDIR_MXTD); 480 PCH_CMASK_ARB | PCH_CMASK_CTRL,
481 &priv->regs->ifregs[0].cmask);
565 482
566 /* Setting CMASK for writing */ 483 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, i);
567 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK | 484 }
568 CAN_CMASK_ARB | CAN_CMASK_CTRL,
569 &priv->regs->if1_cmask);
570 485
571 pch_can_check_if_busy(&priv->regs->if1_creq, i+1); 486 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
572 } else if (priv->msg_obj[i] == MSG_OBJ_TX) { 487 iowrite32(PCH_CMASK_RX_TX_GET,
573 iowrite32(CAN_CMASK_RX_TX_GET, 488 &priv->regs->ifregs[1].cmask);
574 &priv->regs->if2_cmask); 489 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, i);
575 pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
576 490
577 /* Resetting DIR bit for reception */ 491 /* Resetting DIR bit for reception */
578 iowrite32(0x0, &priv->regs->if2_id1); 492 iowrite32(0x0, &priv->regs->ifregs[1].id1);
579 iowrite32(0x0, &priv->regs->if2_id2); 493 iowrite32(0x0, &priv->regs->ifregs[1].id2);
580 pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR); 494 pch_can_bit_set(&priv->regs->ifregs[1].id2, PCH_ID2_DIR);
581 495
582 /* Setting EOB bit for transmitter */ 496 /* Setting EOB bit for transmitter */
583 iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont); 497 iowrite32(PCH_IF_MCONT_EOB, &priv->regs->ifregs[1].mcont);
584 498
585 pch_can_bit_set(&priv->regs->if2_mcont, 499 pch_can_bit_set(&priv->regs->ifregs[1].mcont,
586 CAN_IF_MCONT_UMASK); 500 PCH_IF_MCONT_UMASK);
587 501
588 iowrite32(0, &priv->regs->if2_mask1); 502 iowrite32(0, &priv->regs->ifregs[1].mask1);
589 pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff); 503 pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
590 504
591 /* Setting CMASK for writing */ 505 /* Setting CMASK for writing */
592 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK | 506 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
593 CAN_CMASK_ARB | CAN_CMASK_CTRL, 507 PCH_CMASK_ARB | PCH_CMASK_CTRL,
594 &priv->regs->if2_cmask); 508 &priv->regs->ifregs[1].cmask);
595 509
596 pch_can_check_if_busy(&priv->regs->if2_creq, i+1); 510 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, i);
597 }
598 } 511 }
599 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
600} 512}
601 513
602static void pch_can_init(struct pch_can_priv *priv) 514static void pch_can_init(struct pch_can_priv *priv)
@@ -623,50 +535,50 @@ static void pch_can_release(struct pch_can_priv *priv)
623 pch_can_set_int_enables(priv, PCH_CAN_NONE); 535 pch_can_set_int_enables(priv, PCH_CAN_NONE);
624 536
625 /* Disabling all the receive object. */ 537 /* Disabling all the receive object. */
626 pch_can_rx_disable_all(priv); 538 pch_can_set_rx_all(priv, 0);
627 539
628 /* Disabling all the transmit object. */ 540 /* Disabling all the transmit object. */
629 pch_can_tx_disable_all(priv); 541 pch_can_set_tx_all(priv, 0);
630} 542}
631 543
632/* This function clears interrupt(s) from the CAN device. */ 544/* This function clears interrupt(s) from the CAN device. */
633static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask) 545static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
634{ 546{
635 if (mask == CAN_STATUS_INT) { 547 if (mask == PCH_STATUS_INT) {
636 ioread32(&priv->regs->stat); 548 ioread32(&priv->regs->stat);
637 return; 549 return;
638 } 550 }
639 551
640 /* Clear interrupt for transmit object */ 552 /* Clear interrupt for transmit object */
641 if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) { 553 if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
642 /* Setting CMASK for clearing interrupts for
643 frame transmission. */
644 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
645 &priv->regs->if2_cmask);
646
647 /* Resetting the ID registers. */
648 pch_can_bit_set(&priv->regs->if2_id2,
649 CAN_ID2_DIR | (0x7ff << 2));
650 iowrite32(0x0, &priv->regs->if2_id1);
651
652 /* Claring NewDat, TxRqst & IntPnd */
653 pch_can_bit_clear(&priv->regs->if2_mcont,
654 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
655 CAN_IF_MCONT_TXRQXT);
656 pch_can_check_if_busy(&priv->regs->if2_creq, mask);
657 } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
658 /* Setting CMASK for clearing the reception interrupts. */ 554 /* Setting CMASK for clearing the reception interrupts. */
659 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB, 555 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
660 &priv->regs->if1_cmask); 556 &priv->regs->ifregs[0].cmask);
661 557
662 /* Clearing the Dir bit. */ 558 /* Clearing the Dir bit. */
663 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR); 559 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
664 560
665 /* Clearing NewDat & IntPnd */ 561 /* Clearing NewDat & IntPnd */
666 pch_can_bit_clear(&priv->regs->if1_mcont, 562 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
667 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND); 563 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
564
565 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, mask);
566 } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
567 /* Setting CMASK for clearing interrupts for
568 frame transmission. */
569 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
570 &priv->regs->ifregs[1].cmask);
571
572 /* Resetting the ID registers. */
573 pch_can_bit_set(&priv->regs->ifregs[1].id2,
574 PCH_ID2_DIR | (0x7ff << 2));
575 iowrite32(0x0, &priv->regs->ifregs[1].id1);
668 576
669 pch_can_check_if_busy(&priv->regs->if1_creq, mask); 577 /* Claring NewDat, TxRqst & IntPnd */
578 pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
579 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
580 PCH_IF_MCONT_TXRQXT);
581 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, mask);
670 } 582 }
671} 583}
672 584
@@ -688,7 +600,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
688 struct sk_buff *skb; 600 struct sk_buff *skb;
689 struct pch_can_priv *priv = netdev_priv(ndev); 601 struct pch_can_priv *priv = netdev_priv(ndev);
690 struct can_frame *cf; 602 struct can_frame *cf;
691 u32 errc; 603 u32 errc, lec;
692 struct net_device_stats *stats = &(priv->ndev->stats); 604 struct net_device_stats *stats = &(priv->ndev->stats);
693 enum can_state state = priv->can.state; 605 enum can_state state = priv->can.state;
694 606
@@ -697,8 +609,8 @@ static void pch_can_error(struct net_device *ndev, u32 status)
697 return; 609 return;
698 610
699 if (status & PCH_BUS_OFF) { 611 if (status & PCH_BUS_OFF) {
700 pch_can_tx_disable_all(priv); 612 pch_can_set_tx_all(priv, 0);
701 pch_can_rx_disable_all(priv); 613 pch_can_set_rx_all(priv, 0);
702 state = CAN_STATE_BUS_OFF; 614 state = CAN_STATE_BUS_OFF;
703 cf->can_id |= CAN_ERR_BUSOFF; 615 cf->can_id |= CAN_ERR_BUSOFF;
704 can_bus_off(ndev); 616 can_bus_off(ndev);
@@ -712,9 +624,9 @@ static void pch_can_error(struct net_device *ndev, u32 status)
712 priv->can.can_stats.error_warning++; 624 priv->can.can_stats.error_warning++;
713 cf->can_id |= CAN_ERR_CRTL; 625 cf->can_id |= CAN_ERR_CRTL;
714 errc = ioread32(&priv->regs->errc); 626 errc = ioread32(&priv->regs->errc);
715 if (((errc & CAN_REC) >> 8) > 96) 627 if (((errc & PCH_REC) >> 8) > 96)
716 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; 628 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
717 if ((errc & CAN_TEC) > 96) 629 if ((errc & PCH_TEC) > 96)
718 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; 630 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
719 dev_warn(&ndev->dev, 631 dev_warn(&ndev->dev,
720 "%s -> Error Counter is more than 96.\n", __func__); 632 "%s -> Error Counter is more than 96.\n", __func__);
@@ -725,41 +637,45 @@ static void pch_can_error(struct net_device *ndev, u32 status)
725 state = CAN_STATE_ERROR_PASSIVE; 637 state = CAN_STATE_ERROR_PASSIVE;
726 cf->can_id |= CAN_ERR_CRTL; 638 cf->can_id |= CAN_ERR_CRTL;
727 errc = ioread32(&priv->regs->errc); 639 errc = ioread32(&priv->regs->errc);
728 if (((errc & CAN_REC) >> 8) > 127) 640 if (((errc & PCH_REC) >> 8) > 127)
729 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 641 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
730 if ((errc & CAN_TEC) > 127) 642 if ((errc & PCH_TEC) > 127)
731 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; 643 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
732 dev_err(&ndev->dev, 644 dev_err(&ndev->dev,
733 "%s -> CAN controller is ERROR PASSIVE .\n", __func__); 645 "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
734 } 646 }
735 647
736 if (status & PCH_LEC_ALL) { 648 lec = status & PCH_LEC_ALL;
649 switch (lec) {
650 case PCH_STUF_ERR:
651 cf->data[2] |= CAN_ERR_PROT_STUFF;
737 priv->can.can_stats.bus_error++; 652 priv->can.can_stats.bus_error++;
738 stats->rx_errors++; 653 stats->rx_errors++;
739 switch (status & PCH_LEC_ALL) { 654 break;
740 case PCH_STUF_ERR: 655 case PCH_FORM_ERR:
741 cf->data[2] |= CAN_ERR_PROT_STUFF; 656 cf->data[2] |= CAN_ERR_PROT_FORM;
742 break; 657 priv->can.can_stats.bus_error++;
743 case PCH_FORM_ERR: 658 stats->rx_errors++;
744 cf->data[2] |= CAN_ERR_PROT_FORM; 659 break;
745 break; 660 case PCH_ACK_ERR:
746 case PCH_ACK_ERR: 661 cf->can_id |= CAN_ERR_ACK;
747 cf->data[2] |= CAN_ERR_PROT_LOC_ACK | 662 priv->can.can_stats.bus_error++;
748 CAN_ERR_PROT_LOC_ACK_DEL; 663 stats->rx_errors++;
749 break; 664 break;
750 case PCH_BIT1_ERR: 665 case PCH_BIT1_ERR:
751 case PCH_BIT0_ERR: 666 case PCH_BIT0_ERR:
752 cf->data[2] |= CAN_ERR_PROT_BIT; 667 cf->data[2] |= CAN_ERR_PROT_BIT;
753 break; 668 priv->can.can_stats.bus_error++;
754 case PCH_CRC_ERR: 669 stats->rx_errors++;
755 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | 670 break;
756 CAN_ERR_PROT_LOC_CRC_DEL; 671 case PCH_CRC_ERR:
757 break; 672 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
758 default: 673 CAN_ERR_PROT_LOC_CRC_DEL;
759 iowrite32(status | PCH_LEC_ALL, &priv->regs->stat); 674 priv->can.can_stats.bus_error++;
760 break; 675 stats->rx_errors++;
761 } 676 break;
762 677 case PCH_LEC_ALL: /* Written by CPU. No error status */
678 break;
763 } 679 }
764 680
765 priv->can.state = state; 681 priv->can.state = state;
@@ -795,22 +711,22 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
795 struct net_device_stats *stats = &(priv->ndev->stats); 711 struct net_device_stats *stats = &(priv->ndev->stats);
796 712
797 /* Reading the messsage object from the Message RAM */ 713 /* Reading the messsage object from the Message RAM */
798 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 714 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
799 pch_can_check_if_busy(&priv->regs->if1_creq, int_stat); 715 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, int_stat);
800 716
801 /* Reading the MCONT register. */ 717 /* Reading the MCONT register. */
802 reg = ioread32(&priv->regs->if1_mcont); 718 reg = ioread32(&priv->regs->ifregs[0].mcont);
803 reg &= 0xffff; 719 reg &= 0xffff;
804 720
805 for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) { 721 for (k = int_stat; !(reg & PCH_IF_MCONT_EOB); k++) {
806 /* If MsgLost bit set. */ 722 /* If MsgLost bit set. */
807 if (reg & CAN_IF_MCONT_MSGLOST) { 723 if (reg & PCH_IF_MCONT_MSGLOST) {
808 dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n"); 724 dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
809 pch_can_bit_clear(&priv->regs->if1_mcont, 725 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
810 CAN_IF_MCONT_MSGLOST); 726 PCH_IF_MCONT_MSGLOST);
811 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, 727 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
812 &priv->regs->if1_cmask); 728 &priv->regs->ifregs[0].cmask);
813 pch_can_check_if_busy(&priv->regs->if1_creq, k); 729 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, k);
814 730
815 skb = alloc_can_err_skb(ndev, &cf); 731 skb = alloc_can_err_skb(ndev, &cf);
816 if (!skb) 732 if (!skb)
@@ -828,7 +744,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
828 rcv_pkts++; 744 rcv_pkts++;
829 goto RX_NEXT; 745 goto RX_NEXT;
830 } 746 }
831 if (!(reg & CAN_IF_MCONT_NEWDAT)) 747 if (!(reg & PCH_IF_MCONT_NEWDAT))
832 goto RX_NEXT; 748 goto RX_NEXT;
833 749
834 skb = alloc_can_skb(priv->ndev, &cf); 750 skb = alloc_can_skb(priv->ndev, &cf);
@@ -836,29 +752,30 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
836 return -ENOMEM; 752 return -ENOMEM;
837 753
838 /* Get Received data */ 754 /* Get Received data */
839 ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14; 755 ide = ((ioread32(&priv->regs->ifregs[0].id2)) & PCH_ID2_XTD) >>
756 14;
840 if (ide) { 757 if (ide) {
841 id = (ioread32(&priv->regs->if1_id1) & 0xffff); 758 id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
842 id |= (((ioread32(&priv->regs->if1_id2)) & 759 id |= (((ioread32(&priv->regs->ifregs[0].id2)) &
843 0x1fff) << 16); 760 0x1fff) << 16);
844 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; 761 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
845 } else { 762 } else {
846 id = (((ioread32(&priv->regs->if1_id2)) & 763 id = (((ioread32(&priv->regs->ifregs[0].id2)) &
847 (CAN_SFF_MASK << 2)) >> 2); 764 (CAN_SFF_MASK << 2)) >> 2);
848 cf->can_id = (id & CAN_SFF_MASK); 765 cf->can_id = (id & CAN_SFF_MASK);
849 } 766 }
850 767
851 rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR); 768 rtr = (ioread32(&priv->regs->ifregs[0].id2) & PCH_ID2_DIR);
852 if (rtr) { 769 if (rtr) {
853 cf->can_dlc = 0; 770 cf->can_dlc = 0;
854 cf->can_id |= CAN_RTR_FLAG; 771 cf->can_id |= CAN_RTR_FLAG;
855 } else { 772 } else {
856 cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) & 773 cf->can_dlc =
857 0x0f); 774 ((ioread32(&priv->regs->ifregs[0].mcont)) & 0x0f);
858 } 775 }
859 776
860 for (i = 0, j = 0; i < cf->can_dlc; j++) { 777 for (i = 0, j = 0; i < cf->can_dlc; j++) {
861 reg = ioread32(&priv->regs->if1_dataa1 + j*4); 778 reg = ioread32(&priv->regs->ifregs[0].dataa1 + j*4);
862 cf->data[i++] = cpu_to_le32(reg & 0xff); 779 cf->data[i++] = cpu_to_le32(reg & 0xff);
863 if (i == cf->can_dlc) 780 if (i == cf->can_dlc)
864 break; 781 break;
@@ -871,16 +788,17 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
871 stats->rx_bytes += cf->can_dlc; 788 stats->rx_bytes += cf->can_dlc;
872 789
873 if (k < PCH_FIFO_THRESH) { 790 if (k < PCH_FIFO_THRESH) {
874 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | 791 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
875 CAN_CMASK_ARB, &priv->regs->if1_cmask); 792 PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
876 793
877 /* Clearing the Dir bit. */ 794 /* Clearing the Dir bit. */
878 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR); 795 pch_can_bit_clear(&priv->regs->ifregs[0].id2,
796 PCH_ID2_DIR);
879 797
880 /* Clearing NewDat & IntPnd */ 798 /* Clearing NewDat & IntPnd */
881 pch_can_bit_clear(&priv->regs->if1_mcont, 799 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
882 CAN_IF_MCONT_INTPND); 800 PCH_IF_MCONT_INTPND);
883 pch_can_check_if_busy(&priv->regs->if1_creq, k); 801 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, k);
884 } else if (k > PCH_FIFO_THRESH) { 802 } else if (k > PCH_FIFO_THRESH) {
885 pch_can_int_clr(priv, k); 803 pch_can_int_clr(priv, k);
886 } else if (k == PCH_FIFO_THRESH) { 804 } else if (k == PCH_FIFO_THRESH) {
@@ -890,9 +808,9 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
890 } 808 }
891RX_NEXT: 809RX_NEXT:
892 /* Reading the messsage object from the Message RAM */ 810 /* Reading the messsage object from the Message RAM */
893 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 811 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
894 pch_can_check_if_busy(&priv->regs->if1_creq, k + 1); 812 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, k);
895 reg = ioread32(&priv->regs->if1_mcont); 813 reg = ioread32(&priv->regs->ifregs[0].mcont);
896 } 814 }
897 815
898 return rcv_pkts; 816 return rcv_pkts;
@@ -906,14 +824,13 @@ static int pch_can_rx_poll(struct napi_struct *napi, int quota)
906 u32 int_stat; 824 u32 int_stat;
907 int rcv_pkts = 0; 825 int rcv_pkts = 0;
908 u32 reg_stat; 826 u32 reg_stat;
909 unsigned long flags;
910 827
911 int_stat = pch_can_int_pending(priv); 828 int_stat = pch_can_int_pending(priv);
912 if (!int_stat) 829 if (!int_stat)
913 return 0; 830 return 0;
914 831
915INT_STAT: 832INT_STAT:
916 if (int_stat == CAN_STATUS_INT) { 833 if (int_stat == PCH_STATUS_INT) {
917 reg_stat = ioread32(&priv->regs->stat); 834 reg_stat = ioread32(&priv->regs->stat);
918 if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) { 835 if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
919 if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL) 836 if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
@@ -921,11 +838,10 @@ INT_STAT:
921 } 838 }
922 839
923 if (reg_stat & PCH_TX_OK) { 840 if (reg_stat & PCH_TX_OK) {
924 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 841 iowrite32(PCH_CMASK_RX_TX_GET,
925 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask); 842 &priv->regs->ifregs[1].cmask);
926 pch_can_check_if_busy(&priv->regs->if2_creq, 843 pch_can_check_if_busy(&priv->regs->ifregs[1].creq,
927 ioread32(&priv->regs->intr)); 844 ioread32(&priv->regs->intr));
928 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
929 pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK); 845 pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
930 } 846 }
931 847
@@ -933,37 +849,32 @@ INT_STAT:
933 pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK); 849 pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
934 850
935 int_stat = pch_can_int_pending(priv); 851 int_stat = pch_can_int_pending(priv);
936 if (int_stat == CAN_STATUS_INT) 852 if (int_stat == PCH_STATUS_INT)
937 goto INT_STAT; 853 goto INT_STAT;
938 } 854 }
939 855
940MSG_OBJ: 856MSG_OBJ:
941 if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) { 857 if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
942 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
943 rcv_pkts = pch_can_rx_normal(ndev, int_stat); 858 rcv_pkts = pch_can_rx_normal(ndev, int_stat);
944 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
945 if (rcv_pkts < 0) 859 if (rcv_pkts < 0)
946 return 0; 860 return 0;
947 } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) { 861 } else if ((int_stat >= PCH_TX_OBJ_START) &&
948 if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) { 862 (int_stat <= PCH_TX_OBJ_END)) {
949 /* Handle transmission interrupt */ 863 /* Handle transmission interrupt */
950 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1); 864 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
951 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 865 iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
952 iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND, 866 &priv->regs->ifregs[1].cmask);
953 &priv->regs->if2_cmask); 867 dlc = ioread32(&priv->regs->ifregs[1].mcont) &
954 dlc = ioread32(&priv->regs->if2_mcont) & 868 PCH_IF_MCONT_DLC;
955 CAN_IF_MCONT_DLC; 869 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, int_stat);
956 pch_can_check_if_busy(&priv->regs->if2_creq, int_stat); 870 if (dlc > 8)
957 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags); 871 dlc = 8;
958 if (dlc > 8) 872 stats->tx_bytes += dlc;
959 dlc = 8; 873 stats->tx_packets++;
960 stats->tx_bytes += dlc;
961 stats->tx_packets++;
962 }
963 } 874 }
964 875
965 int_stat = pch_can_int_pending(priv); 876 int_stat = pch_can_int_pending(priv);
966 if (int_stat == CAN_STATUS_INT) 877 if (int_stat == PCH_STATUS_INT)
967 goto INT_STAT; 878 goto INT_STAT;
968 else if (int_stat >= 1 && int_stat <= 32) 879 else if (int_stat >= 1 && int_stat <= 32)
969 goto MSG_OBJ; 880 goto MSG_OBJ;
@@ -983,17 +894,17 @@ static int pch_set_bittiming(struct net_device *ndev)
983 u32 brp; 894 u32 brp;
984 895
985 /* Setting the CCE bit for accessing the Can Timing register. */ 896 /* Setting the CCE bit for accessing the Can Timing register. */
986 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE); 897 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
987 898
988 brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1; 899 brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
989 canbit = brp & MSK_BITT_BRP; 900 canbit = brp & PCH_MSK_BITT_BRP;
990 canbit |= (bt->sjw - 1) << BIT_BITT_SJW; 901 canbit |= (bt->sjw - 1) << PCH_BIT_SJW;
991 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1; 902 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1;
992 canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2; 903 canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2;
993 bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE; 904 bepe = (brp & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE;
994 iowrite32(canbit, &priv->regs->bitt); 905 iowrite32(canbit, &priv->regs->bitt);
995 iowrite32(bepe, &priv->regs->brpe); 906 iowrite32(bepe, &priv->regs->brpe);
996 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE); 907 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
997 908
998 return 0; 909 return 0;
999} 910}
@@ -1008,8 +919,8 @@ static void pch_can_start(struct net_device *ndev)
1008 pch_set_bittiming(ndev); 919 pch_set_bittiming(ndev);
1009 pch_can_set_optmode(priv); 920 pch_can_set_optmode(priv);
1010 921
1011 pch_can_tx_enable_all(priv); 922 pch_can_set_tx_all(priv, 1);
1012 pch_can_rx_enable_all(priv); 923 pch_can_set_rx_all(priv, 1);
1013 924
1014 /* Setting the CAN to run mode. */ 925 /* Setting the CAN to run mode. */
1015 pch_can_set_run_mode(priv, PCH_CAN_RUN); 926 pch_can_set_run_mode(priv, PCH_CAN_RUN);
@@ -1113,7 +1024,6 @@ static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
1113static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev) 1024static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
1114{ 1025{
1115 int i, j; 1026 int i, j;
1116 unsigned long flags;
1117 struct pch_can_priv *priv = netdev_priv(ndev); 1027 struct pch_can_priv *priv = netdev_priv(ndev);
1118 struct can_frame *cf = (struct can_frame *)skb->data; 1028 struct can_frame *cf = (struct can_frame *)skb->data;
1119 int tx_buffer_avail = 0; 1029 int tx_buffer_avail = 0;
@@ -1121,72 +1031,68 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
1121 if (can_dropped_invalid_skb(ndev, skb)) 1031 if (can_dropped_invalid_skb(ndev, skb))
1122 return NETDEV_TX_OK; 1032 return NETDEV_TX_OK;
1123 1033
1124 if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */ 1034 if (priv->tx_obj == PCH_TX_OBJ_END) { /* Point tail Obj */
1125 while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) << 1035 while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
1126 PCH_RX_OBJ_NUM))) 1036 PCH_RX_OBJ_NUM)))
1127 udelay(500); 1037 udelay(500);
1128 1038
1129 priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */ 1039 priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj ID */
1130 tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */ 1040 tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
1131 } else { 1041 } else {
1132 tx_buffer_avail = priv->tx_obj; 1042 tx_buffer_avail = priv->tx_obj;
1133 } 1043 }
1134 priv->tx_obj++; 1044 priv->tx_obj++;
1135 1045
1136 /* Attaining the lock. */
1137 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
1138
1139 /* Reading the Msg Obj from the Msg RAM to the Interface register. */ 1046 /* Reading the Msg Obj from the Msg RAM to the Interface register. */
1140 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask); 1047 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
1141 pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail); 1048 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, tx_buffer_avail);
1142 1049
1143 /* Setting the CMASK register. */ 1050 /* Setting the CMASK register. */
1144 pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL); 1051 pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
1145 1052
1146 /* If ID extended is set. */ 1053 /* If ID extended is set. */
1147 pch_can_bit_clear(&priv->regs->if2_id1, 0xffff); 1054 pch_can_bit_clear(&priv->regs->ifregs[1].id1, 0xffff);
1148 pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD); 1055 pch_can_bit_clear(&priv->regs->ifregs[1].id2, 0x1fff | PCH_ID2_XTD);
1149 if (cf->can_id & CAN_EFF_FLAG) { 1056 if (cf->can_id & CAN_EFF_FLAG) {
1150 pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff); 1057 pch_can_bit_set(&priv->regs->ifregs[1].id1,
1151 pch_can_bit_set(&priv->regs->if2_id2, 1058 cf->can_id & 0xffff);
1152 ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD); 1059 pch_can_bit_set(&priv->regs->ifregs[1].id2,
1060 ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD);
1153 } else { 1061 } else {
1154 pch_can_bit_set(&priv->regs->if2_id1, 0); 1062 pch_can_bit_set(&priv->regs->ifregs[1].id1, 0);
1155 pch_can_bit_set(&priv->regs->if2_id2, 1063 pch_can_bit_set(&priv->regs->ifregs[1].id2,
1156 (cf->can_id & CAN_SFF_MASK) << 2); 1064 (cf->can_id & CAN_SFF_MASK) << 2);
1157 } 1065 }
1158 1066
1159 /* If remote frame has to be transmitted.. */ 1067 /* If remote frame has to be transmitted.. */
1160 if (cf->can_id & CAN_RTR_FLAG) 1068 if (cf->can_id & CAN_RTR_FLAG)
1161 pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR); 1069 pch_can_bit_clear(&priv->regs->ifregs[1].id2, PCH_ID2_DIR);
1162 1070
1163 for (i = 0, j = 0; i < cf->can_dlc; j++) { 1071 for (i = 0, j = 0; i < cf->can_dlc; j++) {
1164 iowrite32(le32_to_cpu(cf->data[i++]), 1072 iowrite32(le32_to_cpu(cf->data[i++]),
1165 (&priv->regs->if2_dataa1) + j*4); 1073 (&priv->regs->ifregs[1].dataa1) + j*4);
1166 if (i == cf->can_dlc) 1074 if (i == cf->can_dlc)
1167 break; 1075 break;
1168 iowrite32(le32_to_cpu(cf->data[i++] << 8), 1076 iowrite32(le32_to_cpu(cf->data[i++] << 8),
1169 (&priv->regs->if2_dataa1) + j*4); 1077 (&priv->regs->ifregs[1].dataa1) + j*4);
1170 } 1078 }
1171 1079
1172 can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1); 1080 can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_END - 1);
1173 1081
1174 /* Updating the size of the data. */ 1082 /* Updating the size of the data. */
1175 pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f); 1083 pch_can_bit_clear(&priv->regs->ifregs[1].mcont, 0x0f);
1176 pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc); 1084 pch_can_bit_set(&priv->regs->ifregs[1].mcont, cf->can_dlc);
1177 1085
1178 /* Clearing IntPend, NewDat & TxRqst */ 1086 /* Clearing IntPend, NewDat & TxRqst */
1179 pch_can_bit_clear(&priv->regs->if2_mcont, 1087 pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
1180 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND | 1088 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
1181 CAN_IF_MCONT_TXRQXT); 1089 PCH_IF_MCONT_TXRQXT);
1182 1090
1183 /* Setting NewDat, TxRqst bits */ 1091 /* Setting NewDat, TxRqst bits */
1184 pch_can_bit_set(&priv->regs->if2_mcont, 1092 pch_can_bit_set(&priv->regs->ifregs[1].mcont,
1185 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT); 1093 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT);
1186
1187 pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
1188 1094
1189 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags); 1095 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, tx_buffer_avail);
1190 1096
1191 return NETDEV_TX_OK; 1097 return NETDEV_TX_OK;
1192} 1098}
@@ -1244,27 +1150,20 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1244 pch_can_set_int_enables(priv, PCH_CAN_DISABLE); 1150 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1245 1151
1246 /* Save Tx buffer enable state */ 1152 /* Save Tx buffer enable state */
1247 for (i = 0; i < PCH_OBJ_NUM; i++) { 1153 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1248 if (priv->msg_obj[i] == MSG_OBJ_TX) 1154 priv->tx_enable[i] = pch_can_get_rxtx_ir(priv, i, PCH_TX_IFREG);
1249 pch_can_get_tx_enable(priv, i + 1,
1250 &(priv->tx_enable[i]));
1251 }
1252 1155
1253 /* Disable all Transmit buffers */ 1156 /* Disable all Transmit buffers */
1254 pch_can_tx_disable_all(priv); 1157 pch_can_set_tx_all(priv, 0);
1255 1158
1256 /* Save Rx buffer enable state */ 1159 /* Save Rx buffer enable state */
1257 for (i = 0; i < PCH_OBJ_NUM; i++) { 1160 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1258 if (priv->msg_obj[i] == MSG_OBJ_RX) { 1161 priv->rx_enable[i] = pch_can_get_rxtx_ir(priv, i, PCH_RX_IFREG);
1259 pch_can_get_rx_enable(priv, i + 1, 1162 pch_can_get_rx_buffer_link(priv, i, &priv->rx_link[i]);
1260 &(priv->rx_enable[i]));
1261 pch_can_get_rx_buffer_link(priv, i + 1,
1262 &(priv->rx_link[i]));
1263 }
1264 } 1163 }
1265 1164
1266 /* Disable all Receive buffers */ 1165 /* Disable all Receive buffers */
1267 pch_can_rx_disable_all(priv); 1166 pch_can_set_rx_all(priv, 0);
1268 retval = pci_save_state(pdev); 1167 retval = pci_save_state(pdev);
1269 if (retval) { 1168 if (retval) {
1270 dev_err(&pdev->dev, "pci_save_state failed.\n"); 1169 dev_err(&pdev->dev, "pci_save_state failed.\n");
@@ -1312,23 +1211,16 @@ static int pch_can_resume(struct pci_dev *pdev)
1312 pch_can_set_optmode(priv); 1211 pch_can_set_optmode(priv);
1313 1212
1314 /* Enabling the transmit buffer. */ 1213 /* Enabling the transmit buffer. */
1315 for (i = 0; i < PCH_OBJ_NUM; i++) { 1214 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1316 if (priv->msg_obj[i] == MSG_OBJ_TX) { 1215 pch_can_set_rxtx(priv, i, priv->tx_enable[i], PCH_TX_IFREG);
1317 pch_can_set_tx_enable(priv, i + 1,
1318 priv->tx_enable[i]);
1319 }
1320 }
1321 1216
1322 /* Configuring the receive buffer and enabling them. */ 1217 /* Configuring the receive buffer and enabling them. */
1323 for (i = 0; i < PCH_OBJ_NUM; i++) { 1218 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1324 if (priv->msg_obj[i] == MSG_OBJ_RX) { 1219 /* Restore buffer link */
1325 /* Restore buffer link */ 1220 pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i]);
1326 pch_can_set_rx_buffer_link(priv, i + 1, 1221
1327 priv->rx_link[i]); 1222 /* Restore buffer enables */
1328 1223 pch_can_set_rxtx(priv, i, priv->rx_enable[i], PCH_RX_IFREG);
1329 /* Restore buffer enables */
1330 pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
1331 }
1332 } 1224 }
1333 1225
1334 /* Enable CAN Interrupts */ 1226 /* Enable CAN Interrupts */
@@ -1349,8 +1241,8 @@ static int pch_can_get_berr_counter(const struct net_device *dev,
1349{ 1241{
1350 struct pch_can_priv *priv = netdev_priv(dev); 1242 struct pch_can_priv *priv = netdev_priv(dev);
1351 1243
1352 bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC; 1244 bec->txerr = ioread32(&priv->regs->errc) & PCH_TEC;
1353 bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8; 1245 bec->rxerr = (ioread32(&priv->regs->errc) & PCH_REC) >> 8;
1354 1246
1355 return 0; 1247 return 0;
1356} 1248}
@@ -1361,7 +1253,6 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1361 struct net_device *ndev; 1253 struct net_device *ndev;
1362 struct pch_can_priv *priv; 1254 struct pch_can_priv *priv;
1363 int rc; 1255 int rc;
1364 int index;
1365 void __iomem *addr; 1256 void __iomem *addr;
1366 1257
1367 rc = pci_enable_device(pdev); 1258 rc = pci_enable_device(pdev);
@@ -1383,7 +1274,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1383 goto probe_exit_ipmap; 1274 goto probe_exit_ipmap;
1384 } 1275 }
1385 1276
1386 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM); 1277 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
1387 if (!ndev) { 1278 if (!ndev) {
1388 rc = -ENOMEM; 1279 rc = -ENOMEM;
1389 dev_err(&pdev->dev, "Failed alloc_candev\n"); 1280 dev_err(&pdev->dev, "Failed alloc_candev\n");
@@ -1399,7 +1290,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1399 priv->can.do_get_berr_counter = pch_can_get_berr_counter; 1290 priv->can.do_get_berr_counter = pch_can_get_berr_counter;
1400 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 1291 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
1401 CAN_CTRLMODE_LOOPBACK; 1292 CAN_CTRLMODE_LOOPBACK;
1402 priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */ 1293 priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
1403 1294
1404 ndev->irq = pdev->irq; 1295 ndev->irq = pdev->irq;
1405 ndev->flags |= IFF_ECHO; 1296 ndev->flags |= IFF_ECHO;
@@ -1407,15 +1298,9 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1407 pci_set_drvdata(pdev, ndev); 1298 pci_set_drvdata(pdev, ndev);
1408 SET_NETDEV_DEV(ndev, &pdev->dev); 1299 SET_NETDEV_DEV(ndev, &pdev->dev);
1409 ndev->netdev_ops = &pch_can_netdev_ops; 1300 ndev->netdev_ops = &pch_can_netdev_ops;
1410
1411 priv->can.clock.freq = PCH_CAN_CLK; /* Hz */ 1301 priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
1412 for (index = 0; index < PCH_RX_OBJ_NUM;)
1413 priv->msg_obj[index++] = MSG_OBJ_RX;
1414
1415 for (index = index; index < PCH_OBJ_NUM;)
1416 priv->msg_obj[index++] = MSG_OBJ_TX;
1417 1302
1418 netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM); 1303 netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_END);
1419 1304
1420 rc = register_candev(ndev); 1305 rc = register_candev(ndev);
1421 if (rc) { 1306 if (rc) {
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 5bfccfdf3bb..09c3e9db931 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -107,17 +107,13 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
107 res_size = resource_size(&res); 107 res_size = resource_size(&res);
108 108
109 if (!request_mem_region(res.start, res_size, DRV_NAME)) { 109 if (!request_mem_region(res.start, res_size, DRV_NAME)) {
110 dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n", 110 dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
111 (unsigned long long)res.start,
112 (unsigned long long)res.end);
113 return -EBUSY; 111 return -EBUSY;
114 } 112 }
115 113
116 base = ioremap_nocache(res.start, res_size); 114 base = ioremap_nocache(res.start, res_size);
117 if (!base) { 115 if (!base) {
118 dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n", 116 dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
119 (unsigned long long)res.start,
120 (unsigned long long)res.end);
121 err = -ENOMEM; 117 err = -ENOMEM;
122 goto exit_release_mem; 118 goto exit_release_mem;
123 } 119 }
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
new file mode 100644
index 00000000000..420e95ecc19
--- /dev/null
+++ b/drivers/net/can/slcan.c
@@ -0,0 +1,755 @@
1/*
2 * slcan.c - serial line CAN interface driver (using tty line discipline)
3 *
4 * This file is derived from linux/drivers/net/slip.c
5 *
6 * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
7 * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
8 * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
23 * at http://www.gnu.org/licenses/gpl.html
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
36 * DAMAGE.
37 *
38 * Send feedback to <socketcan-users@lists.berlios.de>
39 *
40 */
41
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44
45#include <asm/system.h>
46#include <linux/uaccess.h>
47#include <linux/bitops.h>
48#include <linux/string.h>
49#include <linux/tty.h>
50#include <linux/errno.h>
51#include <linux/netdevice.h>
52#include <linux/skbuff.h>
53#include <linux/rtnetlink.h>
54#include <linux/if_arp.h>
55#include <linux/if_ether.h>
56#include <linux/delay.h>
57#include <linux/init.h>
58#include <linux/can.h>
59
60static __initdata const char banner[] =
61 KERN_INFO "slcan: serial line CAN interface driver\n";
62
63MODULE_ALIAS_LDISC(N_SLCAN);
64MODULE_DESCRIPTION("serial line CAN interface");
65MODULE_LICENSE("GPL");
66MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
67
68#define SLCAN_MAGIC 0x53CA
69
70static int maxdev = 10; /* MAX number of SLCAN channels;
71 This can be overridden with
72 insmod slcan.ko maxdev=nnn */
73module_param(maxdev, int, 0);
74MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
75
76/* maximum rx buffer len: extended CAN frame with timestamp */
77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
78
79struct slcan {
80 int magic;
81
82 /* Various fields. */
83 struct tty_struct *tty; /* ptr to TTY structure */
84 struct net_device *dev; /* easy for intr handling */
85 spinlock_t lock;
86
87 /* These are pointers to the malloc()ed frame buffers. */
88 unsigned char rbuff[SLC_MTU]; /* receiver buffer */
89 int rcount; /* received chars counter */
90 unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
91 unsigned char *xhead; /* pointer to next XMIT byte */
92 int xleft; /* bytes left in XMIT queue */
93
94 unsigned long flags; /* Flag values/ mode etc */
95#define SLF_INUSE 0 /* Channel in use */
96#define SLF_ERROR 1 /* Parity, etc. error */
97
98 unsigned char leased;
99 dev_t line;
100 pid_t pid;
101};
102
103static struct net_device **slcan_devs;
104
105 /************************************************************************
106 * SLCAN ENCAPSULATION FORMAT *
107 ************************************************************************/
108
109/*
110 * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
111 * frame format) a data length code (can_dlc) which can be from 0 to 8
112 * and up to <can_dlc> data bytes as payload.
113 * Additionally a CAN frame may become a remote transmission frame if the
114 * RTR-bit is set. This causes another ECU to send a CAN frame with the
115 * given can_id.
116 *
117 * The SLCAN ASCII representation of these different frame types is:
118 * <type> <id> <dlc> <data>*
119 *
120 * Extended frames (29 bit) are defined by capital characters in the type.
121 * RTR frames are defined as 'r' types - normal frames have 't' type:
122 * t => 11 bit data frame
123 * r => 11 bit RTR frame
124 * T => 29 bit data frame
125 * R => 29 bit RTR frame
126 *
127 * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
128 * The <dlc> is a one byte ASCII number ('0' - '8')
129 * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
130 *
131 * Examples:
132 *
133 * t1230 : can_id 0x123, can_dlc 0, no data
134 * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33
135 * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55
136 * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request
137 *
138 */
139
140 /************************************************************************
141 * STANDARD SLCAN DECAPSULATION *
142 ************************************************************************/
143
144static int asc2nibble(char c)
145{
146
147 if ((c >= '0') && (c <= '9'))
148 return c - '0';
149
150 if ((c >= 'A') && (c <= 'F'))
151 return c - 'A' + 10;
152
153 if ((c >= 'a') && (c <= 'f'))
154 return c - 'a' + 10;
155
156 return 16; /* error */
157}
158
159/* Send one completely decapsulated can_frame to the network layer */
160static void slc_bump(struct slcan *sl)
161{
162 struct sk_buff *skb;
163 struct can_frame cf;
164 int i, dlc_pos, tmp;
165 unsigned long ultmp;
166 char cmd = sl->rbuff[0];
167
168 if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R'))
169 return;
170
171 if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */
172 dlc_pos = 4; /* dlc position tiiid */
173 else
174 dlc_pos = 9; /* dlc position Tiiiiiiiid */
175
176 if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
177 return;
178
179 cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */
180
181 sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
182
183 if (strict_strtoul(sl->rbuff+1, 16, &ultmp))
184 return;
185
186 cf.can_id = ultmp;
187
188 if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
189 cf.can_id |= CAN_EFF_FLAG;
190
191 if ((cmd | 0x20) == 'r') /* RTR frame */
192 cf.can_id |= CAN_RTR_FLAG;
193
194 *(u64 *) (&cf.data) = 0; /* clear payload */
195
196 for (i = 0, dlc_pos++; i < cf.can_dlc; i++) {
197
198 tmp = asc2nibble(sl->rbuff[dlc_pos++]);
199 if (tmp > 0x0F)
200 return;
201 cf.data[i] = (tmp << 4);
202 tmp = asc2nibble(sl->rbuff[dlc_pos++]);
203 if (tmp > 0x0F)
204 return;
205 cf.data[i] |= tmp;
206 }
207
208
209 skb = dev_alloc_skb(sizeof(struct can_frame));
210 if (!skb)
211 return;
212
213 skb->dev = sl->dev;
214 skb->protocol = htons(ETH_P_CAN);
215 skb->pkt_type = PACKET_BROADCAST;
216 skb->ip_summed = CHECKSUM_UNNECESSARY;
217 memcpy(skb_put(skb, sizeof(struct can_frame)),
218 &cf, sizeof(struct can_frame));
219 netif_rx(skb);
220
221 sl->dev->stats.rx_packets++;
222 sl->dev->stats.rx_bytes += cf.can_dlc;
223}
224
225/* parse tty input stream */
226static void slcan_unesc(struct slcan *sl, unsigned char s)
227{
228
229 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
230 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
231 (sl->rcount > 4)) {
232 slc_bump(sl);
233 }
234 sl->rcount = 0;
235 } else {
236 if (!test_bit(SLF_ERROR, &sl->flags)) {
237 if (sl->rcount < SLC_MTU) {
238 sl->rbuff[sl->rcount++] = s;
239 return;
240 } else {
241 sl->dev->stats.rx_over_errors++;
242 set_bit(SLF_ERROR, &sl->flags);
243 }
244 }
245 }
246}
247
248 /************************************************************************
249 * STANDARD SLCAN ENCAPSULATION *
250 ************************************************************************/
251
252/* Encapsulate one can_frame and stuff into a TTY queue. */
253static void slc_encaps(struct slcan *sl, struct can_frame *cf)
254{
255 int actual, idx, i;
256 char cmd;
257
258 if (cf->can_id & CAN_RTR_FLAG)
259 cmd = 'R'; /* becomes 'r' in standard frame format */
260 else
261 cmd = 'T'; /* becomes 't' in standard frame format */
262
263 if (cf->can_id & CAN_EFF_FLAG)
264 sprintf(sl->xbuff, "%c%08X%d", cmd,
265 cf->can_id & CAN_EFF_MASK, cf->can_dlc);
266 else
267 sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20,
268 cf->can_id & CAN_SFF_MASK, cf->can_dlc);
269
270 idx = strlen(sl->xbuff);
271
272 for (i = 0; i < cf->can_dlc; i++)
273 sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]);
274
275 strcat(sl->xbuff, "\r"); /* add terminating character */
276
277 /* Order of next two lines is *very* important.
278 * When we are sending a little amount of data,
279 * the transfer may be completed inside the ops->write()
280 * routine, because it's running with interrupts enabled.
281 * In this case we *never* got WRITE_WAKEUP event,
282 * if we did not request it before write operation.
283 * 14 Oct 1994 Dmitry Gorodchanin.
284 */
285 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
286 actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff));
287 sl->xleft = strlen(sl->xbuff) - actual;
288 sl->xhead = sl->xbuff + actual;
289 sl->dev->stats.tx_bytes += cf->can_dlc;
290}
291
292/*
293 * Called by the driver when there's room for more data. If we have
294 * more packets to send, we send them here.
295 */
296static void slcan_write_wakeup(struct tty_struct *tty)
297{
298 int actual;
299 struct slcan *sl = (struct slcan *) tty->disc_data;
300
301 /* First make sure we're connected. */
302 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
303 return;
304
305 if (sl->xleft <= 0) {
306 /* Now serial buffer is almost free & we can start
307 * transmission of another packet */
308 sl->dev->stats.tx_packets++;
309 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
310 netif_wake_queue(sl->dev);
311 return;
312 }
313
314 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
315 sl->xleft -= actual;
316 sl->xhead += actual;
317}
318
319/* Send a can_frame to a TTY queue. */
320static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
321{
322 struct slcan *sl = netdev_priv(dev);
323
324 if (skb->len != sizeof(struct can_frame))
325 goto out;
326
327 spin_lock(&sl->lock);
328 if (!netif_running(dev)) {
329 spin_unlock(&sl->lock);
330 printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
331 goto out;
332 }
333 if (sl->tty == NULL) {
334 spin_unlock(&sl->lock);
335 goto out;
336 }
337
338 netif_stop_queue(sl->dev);
339 slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
340 spin_unlock(&sl->lock);
341
342out:
343 kfree_skb(skb);
344 return NETDEV_TX_OK;
345}
346
347
348/******************************************
349 * Routines looking at netdevice side.
350 ******************************************/
351
352/* Netdevice UP -> DOWN routine */
353static int slc_close(struct net_device *dev)
354{
355 struct slcan *sl = netdev_priv(dev);
356
357 spin_lock_bh(&sl->lock);
358 if (sl->tty) {
359 /* TTY discipline is running. */
360 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
361 }
362 netif_stop_queue(dev);
363 sl->rcount = 0;
364 sl->xleft = 0;
365 spin_unlock_bh(&sl->lock);
366
367 return 0;
368}
369
370/* Netdevice DOWN -> UP routine */
371static int slc_open(struct net_device *dev)
372{
373 struct slcan *sl = netdev_priv(dev);
374
375 if (sl->tty == NULL)
376 return -ENODEV;
377
378 sl->flags &= (1 << SLF_INUSE);
379 netif_start_queue(dev);
380 return 0;
381}
382
383/* Hook the destructor so we can free slcan devs at the right point in time */
384static void slc_free_netdev(struct net_device *dev)
385{
386 int i = dev->base_addr;
387 free_netdev(dev);
388 slcan_devs[i] = NULL;
389}
390
391static const struct net_device_ops slc_netdev_ops = {
392 .ndo_open = slc_open,
393 .ndo_stop = slc_close,
394 .ndo_start_xmit = slc_xmit,
395};
396
397static void slc_setup(struct net_device *dev)
398{
399 dev->netdev_ops = &slc_netdev_ops;
400 dev->destructor = slc_free_netdev;
401
402 dev->hard_header_len = 0;
403 dev->addr_len = 0;
404 dev->tx_queue_len = 10;
405
406 dev->mtu = sizeof(struct can_frame);
407 dev->type = ARPHRD_CAN;
408
409 /* New-style flags. */
410 dev->flags = IFF_NOARP;
411 dev->features = NETIF_F_NO_CSUM;
412}
413
414/******************************************
415 Routines looking at TTY side.
416 ******************************************/
417
418/*
419 * Handle the 'receiver data ready' interrupt.
420 * This function is called by the 'tty_io' module in the kernel when
421 * a block of SLCAN data has been received, which can now be decapsulated
422 * and sent on to some IP layer for further processing. This will not
423 * be re-entered while running but other ldisc functions may be called
424 * in parallel
425 */
426
427static void slcan_receive_buf(struct tty_struct *tty,
428 const unsigned char *cp, char *fp, int count)
429{
430 struct slcan *sl = (struct slcan *) tty->disc_data;
431
432 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
433 return;
434
435 /* Read the characters out of the buffer */
436 while (count--) {
437 if (fp && *fp++) {
438 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
439 sl->dev->stats.rx_errors++;
440 cp++;
441 continue;
442 }
443 slcan_unesc(sl, *cp++);
444 }
445}
446
447/************************************
448 * slcan_open helper routines.
449 ************************************/
450
451/* Collect hanged up channels */
452static void slc_sync(void)
453{
454 int i;
455 struct net_device *dev;
456 struct slcan *sl;
457
458 for (i = 0; i < maxdev; i++) {
459 dev = slcan_devs[i];
460 if (dev == NULL)
461 break;
462
463 sl = netdev_priv(dev);
464 if (sl->tty || sl->leased)
465 continue;
466 if (dev->flags & IFF_UP)
467 dev_close(dev);
468 }
469}
470
471/* Find a free SLCAN channel, and link in this `tty' line. */
472static struct slcan *slc_alloc(dev_t line)
473{
474 int i;
475 struct net_device *dev = NULL;
476 struct slcan *sl;
477
478 if (slcan_devs == NULL)
479 return NULL; /* Master array missing ! */
480
481 for (i = 0; i < maxdev; i++) {
482 dev = slcan_devs[i];
483 if (dev == NULL)
484 break;
485
486 }
487
488 /* Sorry, too many, all slots in use */
489 if (i >= maxdev)
490 return NULL;
491
492 if (dev) {
493 sl = netdev_priv(dev);
494 if (test_bit(SLF_INUSE, &sl->flags)) {
495 unregister_netdevice(dev);
496 dev = NULL;
497 slcan_devs[i] = NULL;
498 }
499 }
500
501 if (!dev) {
502 char name[IFNAMSIZ];
503 sprintf(name, "slcan%d", i);
504
505 dev = alloc_netdev(sizeof(*sl), name, slc_setup);
506 if (!dev)
507 return NULL;
508 dev->base_addr = i;
509 }
510
511 sl = netdev_priv(dev);
512
513 /* Initialize channel control data */
514 sl->magic = SLCAN_MAGIC;
515 sl->dev = dev;
516 spin_lock_init(&sl->lock);
517 slcan_devs[i] = dev;
518
519 return sl;
520}
521
522/*
523 * Open the high-level part of the SLCAN channel.
524 * This function is called by the TTY module when the
525 * SLCAN line discipline is called for. Because we are
526 * sure the tty line exists, we only have to link it to
527 * a free SLCAN channel...
528 *
529 * Called in process context serialized from other ldisc calls.
530 */
531
532static int slcan_open(struct tty_struct *tty)
533{
534 struct slcan *sl;
535 int err;
536
537 if (!capable(CAP_NET_ADMIN))
538 return -EPERM;
539
540 if (tty->ops->write == NULL)
541 return -EOPNOTSUPP;
542
543 /* RTnetlink lock is misused here to serialize concurrent
544 opens of slcan channels. There are better ways, but it is
545 the simplest one.
546 */
547 rtnl_lock();
548
549 /* Collect hanged up channels. */
550 slc_sync();
551
552 sl = tty->disc_data;
553
554 err = -EEXIST;
555 /* First make sure we're not already connected. */
556 if (sl && sl->magic == SLCAN_MAGIC)
557 goto err_exit;
558
559 /* OK. Find a free SLCAN channel to use. */
560 err = -ENFILE;
561 sl = slc_alloc(tty_devnum(tty));
562 if (sl == NULL)
563 goto err_exit;
564
565 sl->tty = tty;
566 tty->disc_data = sl;
567 sl->line = tty_devnum(tty);
568 sl->pid = current->pid;
569
570 if (!test_bit(SLF_INUSE, &sl->flags)) {
571 /* Perform the low-level SLCAN initialization. */
572 sl->rcount = 0;
573 sl->xleft = 0;
574
575 set_bit(SLF_INUSE, &sl->flags);
576
577 err = register_netdevice(sl->dev);
578 if (err)
579 goto err_free_chan;
580 }
581
582 /* Done. We have linked the TTY line to a channel. */
583 rtnl_unlock();
584 tty->receive_room = 65536; /* We don't flow control */
585 return sl->dev->base_addr;
586
587err_free_chan:
588 sl->tty = NULL;
589 tty->disc_data = NULL;
590 clear_bit(SLF_INUSE, &sl->flags);
591
592err_exit:
593 rtnl_unlock();
594
595 /* Count references from TTY module */
596 return err;
597}
598
599/*
600 * Close down a SLCAN channel.
601 * This means flushing out any pending queues, and then returning. This
602 * call is serialized against other ldisc functions.
603 *
604 * We also use this method for a hangup event.
605 */
606
607static void slcan_close(struct tty_struct *tty)
608{
609 struct slcan *sl = (struct slcan *) tty->disc_data;
610
611 /* First make sure we're connected. */
612 if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
613 return;
614
615 tty->disc_data = NULL;
616 sl->tty = NULL;
617 if (!sl->leased)
618 sl->line = 0;
619
620 /* Flush network side */
621 unregister_netdev(sl->dev);
622 /* This will complete via sl_free_netdev */
623}
624
625static int slcan_hangup(struct tty_struct *tty)
626{
627 slcan_close(tty);
628 return 0;
629}
630
631/* Perform I/O control on an active SLCAN channel. */
632static int slcan_ioctl(struct tty_struct *tty, struct file *file,
633 unsigned int cmd, unsigned long arg)
634{
635 struct slcan *sl = (struct slcan *) tty->disc_data;
636 unsigned int tmp;
637
638 /* First make sure we're connected. */
639 if (!sl || sl->magic != SLCAN_MAGIC)
640 return -EINVAL;
641
642 switch (cmd) {
643 case SIOCGIFNAME:
644 tmp = strlen(sl->dev->name) + 1;
645 if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
646 return -EFAULT;
647 return 0;
648
649 case SIOCSIFHWADDR:
650 return -EINVAL;
651
652 default:
653 return tty_mode_ioctl(tty, file, cmd, arg);
654 }
655}
656
657static struct tty_ldisc_ops slc_ldisc = {
658 .owner = THIS_MODULE,
659 .magic = TTY_LDISC_MAGIC,
660 .name = "slcan",
661 .open = slcan_open,
662 .close = slcan_close,
663 .hangup = slcan_hangup,
664 .ioctl = slcan_ioctl,
665 .receive_buf = slcan_receive_buf,
666 .write_wakeup = slcan_write_wakeup,
667};
668
669static int __init slcan_init(void)
670{
671 int status;
672
673 if (maxdev < 4)
674 maxdev = 4; /* Sanity */
675
676 printk(banner);
677 printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
678
679 slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
680 if (!slcan_devs) {
681 printk(KERN_ERR "slcan: can't allocate slcan device array!\n");
682 return -ENOMEM;
683 }
684
685 /* Fill in our line protocol discipline, and register it */
686 status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
687 if (status) {
688 printk(KERN_ERR "slcan: can't register line discipline\n");
689 kfree(slcan_devs);
690 }
691 return status;
692}
693
694static void __exit slcan_exit(void)
695{
696 int i;
697 struct net_device *dev;
698 struct slcan *sl;
699 unsigned long timeout = jiffies + HZ;
700 int busy = 0;
701
702 if (slcan_devs == NULL)
703 return;
704
705 /* First of all: check for active disciplines and hangup them.
706 */
707 do {
708 if (busy)
709 msleep_interruptible(100);
710
711 busy = 0;
712 for (i = 0; i < maxdev; i++) {
713 dev = slcan_devs[i];
714 if (!dev)
715 continue;
716 sl = netdev_priv(dev);
717 spin_lock_bh(&sl->lock);
718 if (sl->tty) {
719 busy++;
720 tty_hangup(sl->tty);
721 }
722 spin_unlock_bh(&sl->lock);
723 }
724 } while (busy && time_before(jiffies, timeout));
725
726 /* FIXME: hangup is async so we should wait when doing this second
727 phase */
728
729 for (i = 0; i < maxdev; i++) {
730 dev = slcan_devs[i];
731 if (!dev)
732 continue;
733 slcan_devs[i] = NULL;
734
735 sl = netdev_priv(dev);
736 if (sl->tty) {
737 printk(KERN_ERR "%s: tty discipline still running\n",
738 dev->name);
739 /* Intentionally leak the control block. */
740 dev->destructor = NULL;
741 }
742
743 unregister_netdev(dev);
744 }
745
746 kfree(slcan_devs);
747 slcan_devs = NULL;
748
749 i = tty_unregister_ldisc(N_SLCAN);
750 if (i)
751 printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i);
752}
753
754module_init(slcan_init);
755module_exit(slcan_exit);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 92bac19ad60..594ca9c2c10 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1695,7 +1695,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1695 *work = num; 1695 *work = num;
1696 return -EINVAL; 1696 return -EINVAL;
1697 } 1697 }
1698 *work = 2 + req2->num_additional_wqes;; 1698 *work = 2 + req2->num_additional_wqes;
1699 1699
1700 l5_cid = req1->iscsi_conn_id; 1700 l5_cid = req1->iscsi_conn_id;
1701 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1701 if (l5_cid >= MAX_ISCSI_TBL_SZ)
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 81475cc80e1..80c2feeefec 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -59,7 +59,6 @@ static struct sockaddr default_mac = {
59 59
60/* Information that need to be kept for each board. */ 60/* Information that need to be kept for each board. */
61struct net_local { 61struct net_local {
62 struct net_device_stats stats;
63 struct mii_if_info mii_if; 62 struct mii_if_info mii_if;
64 63
65 /* Tx control lock. This protects the transmit buffer ring 64 /* Tx control lock. This protects the transmit buffer ring
@@ -1059,7 +1058,7 @@ e100_tx_timeout(struct net_device *dev)
1059 1058
1060 /* remember we got an error */ 1059 /* remember we got an error */
1061 1060
1062 np->stats.tx_errors++; 1061 dev->stats.tx_errors++;
1063 1062
1064 /* reset the TX DMA in case it has hung on something */ 1063 /* reset the TX DMA in case it has hung on something */
1065 1064
@@ -1157,7 +1156,7 @@ e100rxtx_interrupt(int irq, void *dev_id)
1157 * allocate a new buffer to put a packet in. 1156 * allocate a new buffer to put a packet in.
1158 */ 1157 */
1159 e100_rx(dev); 1158 e100_rx(dev);
1160 np->stats.rx_packets++; 1159 dev->stats.rx_packets++;
1161 /* restart/continue on the channel, for safety */ 1160 /* restart/continue on the channel, for safety */
1162 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); 1161 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
1163 /* clear dma channel 1 eop/descr irq bits */ 1162 /* clear dma channel 1 eop/descr irq bits */
@@ -1173,8 +1172,8 @@ e100rxtx_interrupt(int irq, void *dev_id)
1173 /* Report any packets that have been sent */ 1172 /* Report any packets that have been sent */
1174 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST && 1173 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
1175 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) { 1174 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
1176 np->stats.tx_bytes += myFirstTxDesc->skb->len; 1175 dev->stats.tx_bytes += myFirstTxDesc->skb->len;
1177 np->stats.tx_packets++; 1176 dev->stats.tx_packets++;
1178 1177
1179 /* dma is ready with the transmission of the data in tx_skb, so now 1178 /* dma is ready with the transmission of the data in tx_skb, so now
1180 we can release the skb memory */ 1179 we can release the skb memory */
@@ -1197,7 +1196,6 @@ static irqreturn_t
1197e100nw_interrupt(int irq, void *dev_id) 1196e100nw_interrupt(int irq, void *dev_id)
1198{ 1197{
1199 struct net_device *dev = (struct net_device *)dev_id; 1198 struct net_device *dev = (struct net_device *)dev_id;
1200 struct net_local *np = netdev_priv(dev);
1201 unsigned long irqbits = *R_IRQ_MASK0_RD; 1199 unsigned long irqbits = *R_IRQ_MASK0_RD;
1202 1200
1203 /* check for underrun irq */ 1201 /* check for underrun irq */
@@ -1205,13 +1203,13 @@ e100nw_interrupt(int irq, void *dev_id)
1205 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1203 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1206 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1204 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1207 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1205 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1208 np->stats.tx_errors++; 1206 dev->stats.tx_errors++;
1209 D(printk("ethernet receiver underrun!\n")); 1207 D(printk("ethernet receiver underrun!\n"));
1210 } 1208 }
1211 1209
1212 /* check for overrun irq */ 1210 /* check for overrun irq */
1213 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) { 1211 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
1214 update_rx_stats(&np->stats); /* this will ack the irq */ 1212 update_rx_stats(&dev->stats); /* this will ack the irq */
1215 D(printk("ethernet receiver overrun!\n")); 1213 D(printk("ethernet receiver overrun!\n"));
1216 } 1214 }
1217 /* check for excessive collision irq */ 1215 /* check for excessive collision irq */
@@ -1219,7 +1217,7 @@ e100nw_interrupt(int irq, void *dev_id)
1219 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1217 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1220 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1218 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1221 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1219 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1222 np->stats.tx_errors++; 1220 dev->stats.tx_errors++;
1223 D(printk("ethernet excessive collisions!\n")); 1221 D(printk("ethernet excessive collisions!\n"));
1224 } 1222 }
1225 return IRQ_HANDLED; 1223 return IRQ_HANDLED;
@@ -1250,7 +1248,7 @@ e100_rx(struct net_device *dev)
1250 spin_unlock(&np->led_lock); 1248 spin_unlock(&np->led_lock);
1251 1249
1252 length = myNextRxDesc->descr.hw_len - 4; 1250 length = myNextRxDesc->descr.hw_len - 4;
1253 np->stats.rx_bytes += length; 1251 dev->stats.rx_bytes += length;
1254 1252
1255#ifdef ETHDEBUG 1253#ifdef ETHDEBUG
1256 printk("Got a packet of length %d:\n", length); 1254 printk("Got a packet of length %d:\n", length);
@@ -1268,7 +1266,7 @@ e100_rx(struct net_device *dev)
1268 /* Small packet, copy data */ 1266 /* Small packet, copy data */
1269 skb = dev_alloc_skb(length - ETHER_HEAD_LEN); 1267 skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
1270 if (!skb) { 1268 if (!skb) {
1271 np->stats.rx_errors++; 1269 dev->stats.rx_errors++;
1272 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1270 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1273 goto update_nextrxdesc; 1271 goto update_nextrxdesc;
1274 } 1272 }
@@ -1294,7 +1292,7 @@ e100_rx(struct net_device *dev)
1294 int align; 1292 int align;
1295 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); 1293 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
1296 if (!new_skb) { 1294 if (!new_skb) {
1297 np->stats.rx_errors++; 1295 dev->stats.rx_errors++;
1298 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1296 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1299 goto update_nextrxdesc; 1297 goto update_nextrxdesc;
1300 } 1298 }
@@ -1333,8 +1331,6 @@ e100_rx(struct net_device *dev)
1333static int 1331static int
1334e100_close(struct net_device *dev) 1332e100_close(struct net_device *dev)
1335{ 1333{
1336 struct net_local *np = netdev_priv(dev);
1337
1338 printk(KERN_INFO "Closing %s.\n", dev->name); 1334 printk(KERN_INFO "Closing %s.\n", dev->name);
1339 1335
1340 netif_stop_queue(dev); 1336 netif_stop_queue(dev);
@@ -1366,8 +1362,8 @@ e100_close(struct net_device *dev)
1366 1362
1367 /* Update the statistics here. */ 1363 /* Update the statistics here. */
1368 1364
1369 update_rx_stats(&np->stats); 1365 update_rx_stats(&dev->stats);
1370 update_tx_stats(&np->stats); 1366 update_tx_stats(&dev->stats);
1371 1367
1372 /* Stop speed/duplex timers */ 1368 /* Stop speed/duplex timers */
1373 del_timer(&speed_timer); 1369 del_timer(&speed_timer);
@@ -1545,11 +1541,11 @@ e100_get_stats(struct net_device *dev)
1545 1541
1546 spin_lock_irqsave(&lp->lock, flags); 1542 spin_lock_irqsave(&lp->lock, flags);
1547 1543
1548 update_rx_stats(&lp->stats); 1544 update_rx_stats(&dev->stats);
1549 update_tx_stats(&lp->stats); 1545 update_tx_stats(&dev->stats);
1550 1546
1551 spin_unlock_irqrestore(&lp->lock, flags); 1547 spin_unlock_irqrestore(&lp->lock, flags);
1552 return &lp->stats; 1548 return &dev->stats;
1553} 1549}
1554 1550
1555/* 1551/*
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 046d846c652..386461750d0 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -3006,12 +3006,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3006 pci_channel_state_t state) 3006 pci_channel_state_t state)
3007{ 3007{
3008 struct adapter *adapter = pci_get_drvdata(pdev); 3008 struct adapter *adapter = pci_get_drvdata(pdev);
3009 int ret;
3010 3009
3011 if (state == pci_channel_io_perm_failure) 3010 if (state == pci_channel_io_perm_failure)
3012 return PCI_ERS_RESULT_DISCONNECT; 3011 return PCI_ERS_RESULT_DISCONNECT;
3013 3012
3014 ret = t3_adapter_error(adapter, 0, 0); 3013 t3_adapter_error(adapter, 0, 0);
3015 3014
3016 /* Request a slot reset. */ 3015 /* Request a slot reset. */
3017 return PCI_ERS_RESULT_NEED_RESET; 3016 return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index bcf07532953..ef02aa68c92 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1164,12 +1164,10 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1164 */ 1164 */
1165void *cxgb_alloc_mem(unsigned long size) 1165void *cxgb_alloc_mem(unsigned long size)
1166{ 1166{
1167 void *p = kmalloc(size, GFP_KERNEL); 1167 void *p = kzalloc(size, GFP_KERNEL);
1168 1168
1169 if (!p) 1169 if (!p)
1170 p = vmalloc(size); 1170 p = vzalloc(size);
1171 if (p)
1172 memset(p, 0, size);
1173 return p; 1171 return p;
1174} 1172}
1175 1173
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f50bc98310f..848f89d19fb 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -868,12 +868,10 @@ out: release_firmware(fw);
868 */ 868 */
869void *t4_alloc_mem(size_t size) 869void *t4_alloc_mem(size_t size)
870{ 870{
871 void *p = kmalloc(size, GFP_KERNEL); 871 void *p = kzalloc(size, GFP_KERNEL);
872 872
873 if (!p) 873 if (!p)
874 p = vmalloc(size); 874 p = vzalloc(size);
875 if (p)
876 memset(p, 0, size);
877 return p; 875 return p;
878} 876}
879 877
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 8ea01962e04..4766b4116b4 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -60,7 +60,7 @@ enum {
60 * MSI-X interrupt index usage. 60 * MSI-X interrupt index usage.
61 */ 61 */
62 MSIX_FW = 0, /* MSI-X index for firmware Q */ 62 MSIX_FW = 0, /* MSI-X index for firmware Q */
63 MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */ 63 MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
64 MSIX_EXTRAS = 1, 64 MSIX_EXTRAS = 1,
65 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS, 65 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
66 66
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index d887a76cd39..f54af48edb9 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -280,9 +280,7 @@ static void name_msix_vecs(struct adapter *adapter)
280 const struct port_info *pi = netdev_priv(dev); 280 const struct port_info *pi = netdev_priv(dev);
281 int qs, msi; 281 int qs, msi;
282 282
283 for (qs = 0, msi = MSIX_NIQFLINT; 283 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
284 qs < pi->nqsets;
285 qs++, msi++) {
286 snprintf(adapter->msix_info[msi].desc, namelen, 284 snprintf(adapter->msix_info[msi].desc, namelen,
287 "%s-%d", dev->name, qs); 285 "%s-%d", dev->name, qs);
288 adapter->msix_info[msi].desc[namelen] = 0; 286 adapter->msix_info[msi].desc[namelen] = 0;
@@ -309,7 +307,7 @@ static int request_msix_queue_irqs(struct adapter *adapter)
309 /* 307 /*
310 * Ethernet queues. 308 * Ethernet queues.
311 */ 309 */
312 msi = MSIX_NIQFLINT; 310 msi = MSIX_IQFLINT;
313 for_each_ethrxq(s, rxq) { 311 for_each_ethrxq(s, rxq) {
314 err = request_irq(adapter->msix_info[msi].vec, 312 err = request_irq(adapter->msix_info[msi].vec,
315 t4vf_sge_intr_msix, 0, 313 t4vf_sge_intr_msix, 0,
@@ -337,7 +335,7 @@ static void free_msix_queue_irqs(struct adapter *adapter)
337 int rxq, msi; 335 int rxq, msi;
338 336
339 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); 337 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
340 msi = MSIX_NIQFLINT; 338 msi = MSIX_IQFLINT;
341 for_each_ethrxq(s, rxq) 339 for_each_ethrxq(s, rxq)
342 free_irq(adapter->msix_info[msi++].vec, 340 free_irq(adapter->msix_info[msi++].vec,
343 &s->ethrxq[rxq].rspq); 341 &s->ethrxq[rxq].rspq);
@@ -527,7 +525,7 @@ static int setup_sge_queues(struct adapter *adapter)
527 * brought up at which point lots of things get nailed down 525 * brought up at which point lots of things get nailed down
528 * permanently ... 526 * permanently ...
529 */ 527 */
530 msix = MSIX_NIQFLINT; 528 msix = MSIX_IQFLINT;
531 for_each_port(adapter, pidx) { 529 for_each_port(adapter, pidx) {
532 struct net_device *dev = adapter->port[pidx]; 530 struct net_device *dev = adapter->port[pidx];
533 struct port_info *pi = netdev_priv(dev); 531 struct port_info *pi = netdev_priv(dev);
@@ -1365,6 +1363,8 @@ struct queue_port_stats {
1365 u64 rx_csum; 1363 u64 rx_csum;
1366 u64 vlan_ex; 1364 u64 vlan_ex;
1367 u64 vlan_ins; 1365 u64 vlan_ins;
1366 u64 lro_pkts;
1367 u64 lro_merged;
1368}; 1368};
1369 1369
1370/* 1370/*
@@ -1402,6 +1402,8 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
1402 "RxCsumGood ", 1402 "RxCsumGood ",
1403 "VLANextractions ", 1403 "VLANextractions ",
1404 "VLANinsertions ", 1404 "VLANinsertions ",
1405 "GROPackets ",
1406 "GROMerged ",
1405}; 1407};
1406 1408
1407/* 1409/*
@@ -1451,6 +1453,8 @@ static void collect_sge_port_stats(const struct adapter *adapter,
1451 stats->rx_csum += rxq->stats.rx_cso; 1453 stats->rx_csum += rxq->stats.rx_cso;
1452 stats->vlan_ex += rxq->stats.vlan_ex; 1454 stats->vlan_ex += rxq->stats.vlan_ex;
1453 stats->vlan_ins += txq->vlan_ins; 1455 stats->vlan_ins += txq->vlan_ins;
1456 stats->lro_pkts += rxq->stats.lro_pkts;
1457 stats->lro_merged += rxq->stats.lro_merged;
1454 } 1458 }
1455} 1459}
1456 1460
@@ -1547,14 +1551,19 @@ static void cxgb4vf_get_wol(struct net_device *dev,
1547} 1551}
1548 1552
1549/* 1553/*
1554 * TCP Segmentation Offload flags which we support.
1555 */
1556#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1557
1558/*
1550 * Set TCP Segmentation Offloading feature capabilities. 1559 * Set TCP Segmentation Offloading feature capabilities.
1551 */ 1560 */
1552static int cxgb4vf_set_tso(struct net_device *dev, u32 tso) 1561static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
1553{ 1562{
1554 if (tso) 1563 if (tso)
1555 dev->features |= NETIF_F_TSO | NETIF_F_TSO6; 1564 dev->features |= TSO_FLAGS;
1556 else 1565 else
1557 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 1566 dev->features &= ~TSO_FLAGS;
1558 return 0; 1567 return 0;
1559} 1568}
1560 1569
@@ -2045,7 +2054,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2045 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave 2054 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2046 * it to our caller to tear down the directory (debugfs_root). 2055 * it to our caller to tear down the directory (debugfs_root).
2047 */ 2056 */
2048static void __devexit cleanup_debugfs(struct adapter *adapter) 2057static void cleanup_debugfs(struct adapter *adapter)
2049{ 2058{
2050 BUG_ON(adapter->debugfs_root == NULL); 2059 BUG_ON(adapter->debugfs_root == NULL);
2051 2060
@@ -2063,7 +2072,7 @@ static void __devexit cleanup_debugfs(struct adapter *adapter)
2063 * adapter parameters we're going to be using and initialize basic adapter 2072 * adapter parameters we're going to be using and initialize basic adapter
2064 * hardware support. 2073 * hardware support.
2065 */ 2074 */
2066static int adap_init0(struct adapter *adapter) 2075static int __devinit adap_init0(struct adapter *adapter)
2067{ 2076{
2068 struct vf_resources *vfres = &adapter->params.vfres; 2077 struct vf_resources *vfres = &adapter->params.vfres;
2069 struct sge_params *sge_params = &adapter->params.sge; 2078 struct sge_params *sge_params = &adapter->params.sge;
@@ -2487,7 +2496,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2487 version_printed = 1; 2496 version_printed = 1;
2488 } 2497 }
2489 2498
2490
2491 /* 2499 /*
2492 * Initialize generic PCI device state. 2500 * Initialize generic PCI device state.
2493 */ 2501 */
@@ -2624,7 +2632,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2624 netif_carrier_off(netdev); 2632 netif_carrier_off(netdev);
2625 netdev->irq = pdev->irq; 2633 netdev->irq = pdev->irq;
2626 2634
2627 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 2635 netdev->features = (NETIF_F_SG | TSO_FLAGS |
2628 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2636 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2629 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2637 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2630 NETIF_F_GRO); 2638 NETIF_F_GRO);
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index ecf0770bf0f..e0b3d1bc2fd 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -1568,6 +1568,9 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1568 } else 1568 } else
1569 skb_checksum_none_assert(skb); 1569 skb_checksum_none_assert(skb);
1570 1570
1571 /*
1572 * Deliver the packet to the stack.
1573 */
1571 if (unlikely(pkt->vlan_ex)) { 1574 if (unlikely(pkt->vlan_ex)) {
1572 struct vlan_group *grp = pi->vlan_grp; 1575 struct vlan_group *grp = pi->vlan_grp;
1573 1576
@@ -2143,7 +2146,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2143 2146
2144 /* 2147 /*
2145 * Calculate the size of the hardware free list ring plus 2148 * Calculate the size of the hardware free list ring plus
2146 * status page (which the SGE will place at the end of the 2149 * Status Page (which the SGE will place after the end of the
2147 * free list ring) in Egress Queue Units. 2150 * free list ring) in Egress Queue Units.
2148 */ 2151 */
2149 flsz = (fl->size / FL_PER_EQ_UNIT + 2152 flsz = (fl->size / FL_PER_EQ_UNIT +
@@ -2240,8 +2243,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2240 struct port_info *pi = netdev_priv(dev); 2243 struct port_info *pi = netdev_priv(dev);
2241 2244
2242 /* 2245 /*
2243 * Calculate the size of the hardware TX Queue (including the 2246 * Calculate the size of the hardware TX Queue (including the Status
2244 * status age on the end) in units of TX Descriptors. 2247 * Page on the end of the TX Queue) in units of TX Descriptors.
2245 */ 2248 */
2246 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2249 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2247 2250
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 19520afe1a1..35fc803a6a0 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -1300,7 +1300,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
1300 */ 1300 */
1301int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) 1301int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1302{ 1302{
1303 struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl; 1303 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
1304 u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); 1304 u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
1305 1305
1306 switch (opcode) { 1306 switch (opcode) {
@@ -1308,7 +1308,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1308 /* 1308 /*
1309 * Link/module state change message. 1309 * Link/module state change message.
1310 */ 1310 */
1311 const struct fw_port_cmd *port_cmd = (void *)rpl; 1311 const struct fw_port_cmd *port_cmd =
1312 (const struct fw_port_cmd *)rpl;
1312 u32 word; 1313 u32 word;
1313 int action, port_id, link_ok, speed, fc, pidx; 1314 int action, port_id, link_ok, speed, fc, pidx;
1314 1315
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 9f6aeefa06b..2d4c4fc1d90 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1675,7 +1675,7 @@ dm9000_drv_remove(struct platform_device *pdev)
1675 platform_set_drvdata(pdev, NULL); 1675 platform_set_drvdata(pdev, NULL);
1676 1676
1677 unregister_netdev(ndev); 1677 unregister_netdev(ndev);
1678 dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev)); 1678 dm9000_release_board(pdev, netdev_priv(ndev));
1679 free_netdev(ndev); /* free device structure */ 1679 free_netdev(ndev); /* free device structure */
1680 1680
1681 dev_dbg(&pdev->dev, "released and freed device\n"); 1681 dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4d62f7bfa03..06c7d1c6751 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1429,13 +1429,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1429 int size; 1429 int size;
1430 1430
1431 size = sizeof(struct e1000_buffer) * txdr->count; 1431 size = sizeof(struct e1000_buffer) * txdr->count;
1432 txdr->buffer_info = vmalloc(size); 1432 txdr->buffer_info = vzalloc(size);
1433 if (!txdr->buffer_info) { 1433 if (!txdr->buffer_info) {
1434 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1434 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1435 "ring\n"); 1435 "ring\n");
1436 return -ENOMEM; 1436 return -ENOMEM;
1437 } 1437 }
1438 memset(txdr->buffer_info, 0, size);
1439 1438
1440 /* round up to nearest 4K */ 1439 /* round up to nearest 4K */
1441 1440
@@ -1625,13 +1624,12 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1625 int size, desc_len; 1624 int size, desc_len;
1626 1625
1627 size = sizeof(struct e1000_buffer) * rxdr->count; 1626 size = sizeof(struct e1000_buffer) * rxdr->count;
1628 rxdr->buffer_info = vmalloc(size); 1627 rxdr->buffer_info = vzalloc(size);
1629 if (!rxdr->buffer_info) { 1628 if (!rxdr->buffer_info) {
1630 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1629 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1631 "ring\n"); 1630 "ring\n");
1632 return -ENOMEM; 1631 return -ENOMEM;
1633 } 1632 }
1634 memset(rxdr->buffer_info, 0, size);
1635 1633
1636 desc_len = sizeof(struct e1000_rx_desc); 1634 desc_len = sizeof(struct e1000_rx_desc);
1637 1635
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 7236f1a53ba..9333921010c 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -74,6 +74,9 @@ static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
74static s32 e1000_led_on_82574(struct e1000_hw *hw); 74static s32 e1000_led_on_82574(struct e1000_hw *hw);
75static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); 75static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
76static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); 76static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
77static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
78static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
79static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
77 80
78/** 81/**
79 * e1000_init_phy_params_82571 - Init PHY func ptrs. 82 * e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -107,6 +110,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
107 case e1000_82574: 110 case e1000_82574:
108 case e1000_82583: 111 case e1000_82583:
109 phy->type = e1000_phy_bm; 112 phy->type = e1000_phy_bm;
113 phy->ops.acquire = e1000_get_hw_semaphore_82574;
114 phy->ops.release = e1000_put_hw_semaphore_82574;
110 break; 115 break;
111 default: 116 default:
112 return -E1000_ERR_PHY; 117 return -E1000_ERR_PHY;
@@ -200,6 +205,17 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
200 break; 205 break;
201 } 206 }
202 207
208 /* Function Pointers */
209 switch (hw->mac.type) {
210 case e1000_82574:
211 case e1000_82583:
212 nvm->ops.acquire = e1000_get_hw_semaphore_82574;
213 nvm->ops.release = e1000_put_hw_semaphore_82574;
214 break;
215 default:
216 break;
217 }
218
203 return 0; 219 return 0;
204} 220}
205 221
@@ -542,6 +558,94 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
542 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 558 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
543 ew32(SWSM, swsm); 559 ew32(SWSM, swsm);
544} 560}
561/**
562 * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
563 * @hw: pointer to the HW structure
564 *
565 * Acquire the HW semaphore during reset.
566 *
567 **/
568static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
569{
570 u32 extcnf_ctrl;
571 s32 ret_val = 0;
572 s32 i = 0;
573
574 extcnf_ctrl = er32(EXTCNF_CTRL);
575 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
576 do {
577 ew32(EXTCNF_CTRL, extcnf_ctrl);
578 extcnf_ctrl = er32(EXTCNF_CTRL);
579
580 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
581 break;
582
583 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
584
585 msleep(2);
586 i++;
587 } while (i < MDIO_OWNERSHIP_TIMEOUT);
588
589 if (i == MDIO_OWNERSHIP_TIMEOUT) {
590 /* Release semaphores */
591 e1000_put_hw_semaphore_82573(hw);
592 e_dbg("Driver can't access the PHY\n");
593 ret_val = -E1000_ERR_PHY;
594 goto out;
595 }
596
597out:
598 return ret_val;
599}
600
601/**
602 * e1000_put_hw_semaphore_82573 - Release hardware semaphore
603 * @hw: pointer to the HW structure
604 *
605 * Release hardware semaphore used during reset.
606 *
607 **/
608static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
609{
610 u32 extcnf_ctrl;
611
612 extcnf_ctrl = er32(EXTCNF_CTRL);
613 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
614 ew32(EXTCNF_CTRL, extcnf_ctrl);
615}
616
617static DEFINE_MUTEX(swflag_mutex);
618
619/**
620 * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
621 * @hw: pointer to the HW structure
622 *
623 * Acquire the HW semaphore to access the PHY or NVM.
624 *
625 **/
626static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
627{
628 s32 ret_val;
629
630 mutex_lock(&swflag_mutex);
631 ret_val = e1000_get_hw_semaphore_82573(hw);
632 if (ret_val)
633 mutex_unlock(&swflag_mutex);
634 return ret_val;
635}
636
637/**
638 * e1000_put_hw_semaphore_82574 - Release hardware semaphore
639 * @hw: pointer to the HW structure
640 *
641 * Release hardware semaphore used to access the PHY or NVM
642 *
643 **/
644static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
645{
646 e1000_put_hw_semaphore_82573(hw);
647 mutex_unlock(&swflag_mutex);
648}
545 649
546/** 650/**
547 * e1000_acquire_nvm_82571 - Request for access to the EEPROM 651 * e1000_acquire_nvm_82571 - Request for access to the EEPROM
@@ -562,8 +666,6 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
562 666
563 switch (hw->mac.type) { 667 switch (hw->mac.type) {
564 case e1000_82573: 668 case e1000_82573:
565 case e1000_82574:
566 case e1000_82583:
567 break; 669 break;
568 default: 670 default:
569 ret_val = e1000e_acquire_nvm(hw); 671 ret_val = e1000e_acquire_nvm(hw);
@@ -853,9 +955,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
853 **/ 955 **/
854static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 956static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
855{ 957{
856 u32 ctrl, extcnf_ctrl, ctrl_ext, icr; 958 u32 ctrl, ctrl_ext, icr;
857 s32 ret_val; 959 s32 ret_val;
858 u16 i = 0;
859 960
860 /* 961 /*
861 * Prevent the PCI-E bus from sticking if there is no TLP connection 962 * Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -880,33 +981,33 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
880 */ 981 */
881 switch (hw->mac.type) { 982 switch (hw->mac.type) {
882 case e1000_82573: 983 case e1000_82573:
984 ret_val = e1000_get_hw_semaphore_82573(hw);
985 break;
883 case e1000_82574: 986 case e1000_82574:
884 case e1000_82583: 987 case e1000_82583:
885 extcnf_ctrl = er32(EXTCNF_CTRL); 988 ret_val = e1000_get_hw_semaphore_82574(hw);
886 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
887
888 do {
889 ew32(EXTCNF_CTRL, extcnf_ctrl);
890 extcnf_ctrl = er32(EXTCNF_CTRL);
891
892 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
893 break;
894
895 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
896
897 msleep(2);
898 i++;
899 } while (i < MDIO_OWNERSHIP_TIMEOUT);
900 break; 989 break;
901 default: 990 default:
902 break; 991 break;
903 } 992 }
993 if (ret_val)
994 e_dbg("Cannot acquire MDIO ownership\n");
904 995
905 ctrl = er32(CTRL); 996 ctrl = er32(CTRL);
906 997
907 e_dbg("Issuing a global reset to MAC\n"); 998 e_dbg("Issuing a global reset to MAC\n");
908 ew32(CTRL, ctrl | E1000_CTRL_RST); 999 ew32(CTRL, ctrl | E1000_CTRL_RST);
909 1000
1001 /* Must release MDIO ownership and mutex after MAC reset. */
1002 switch (hw->mac.type) {
1003 case e1000_82574:
1004 case e1000_82583:
1005 e1000_put_hw_semaphore_82574(hw);
1006 break;
1007 default:
1008 break;
1009 }
1010
910 if (hw->nvm.type == e1000_nvm_flash_hw) { 1011 if (hw->nvm.type == e1000_nvm_flash_hw) {
911 udelay(10); 1012 udelay(10);
912 ctrl_ext = er32(CTRL_EXT); 1013 ctrl_ext = er32(CTRL_EXT);
@@ -1431,8 +1532,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1431 * auto-negotiation in the TXCW register and disable 1532 * auto-negotiation in the TXCW register and disable
1432 * forced link in the Device Control register in an 1533 * forced link in the Device Control register in an
1433 * attempt to auto-negotiate with our link partner. 1534 * attempt to auto-negotiate with our link partner.
1535 * If the partner code word is null, stop forcing
1536 * and restart auto negotiation.
1434 */ 1537 */
1435 if (rxcw & E1000_RXCW_C) { 1538 if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
1436 /* Enable autoneg, and unforce link up */ 1539 /* Enable autoneg, and unforce link up */
1437 ew32(TXCW, mac->txcw); 1540 ew32(TXCW, mac->txcw);
1438 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 1541 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index d3f7a9c3f97..016ea383145 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -516,6 +516,7 @@
516#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ 516#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
517 517
518/* Receive Configuration Word */ 518/* Receive Configuration Word */
519#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
519#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ 520#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
520#define E1000_RXCW_C 0x20000000 /* Receive config */ 521#define E1000_RXCW_C 0x20000000 /* Receive config */
521#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ 522#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c4ca1629f53..0adcb79e638 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2059,10 +2059,9 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2059 int err = -ENOMEM, size; 2059 int err = -ENOMEM, size;
2060 2060
2061 size = sizeof(struct e1000_buffer) * tx_ring->count; 2061 size = sizeof(struct e1000_buffer) * tx_ring->count;
2062 tx_ring->buffer_info = vmalloc(size); 2062 tx_ring->buffer_info = vzalloc(size);
2063 if (!tx_ring->buffer_info) 2063 if (!tx_ring->buffer_info)
2064 goto err; 2064 goto err;
2065 memset(tx_ring->buffer_info, 0, size);
2066 2065
2067 /* round up to nearest 4K */ 2066 /* round up to nearest 4K */
2068 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 2067 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
@@ -2095,10 +2094,9 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2095 int i, size, desc_len, err = -ENOMEM; 2094 int i, size, desc_len, err = -ENOMEM;
2096 2095
2097 size = sizeof(struct e1000_buffer) * rx_ring->count; 2096 size = sizeof(struct e1000_buffer) * rx_ring->count;
2098 rx_ring->buffer_info = vmalloc(size); 2097 rx_ring->buffer_info = vzalloc(size);
2099 if (!rx_ring->buffer_info) 2098 if (!rx_ring->buffer_info)
2100 goto err; 2099 goto err;
2101 memset(rx_ring->buffer_info, 0, size);
2102 2100
2103 for (i = 0; i < rx_ring->count; i++) { 2101 for (i = 0; i < rx_ring->count; i++) {
2104 buffer_info = &rx_ring->buffer_info[i]; 2102 buffer_info = &rx_ring->buffer_info[i];
@@ -4595,7 +4593,7 @@ dma_error:
4595 i += tx_ring->count; 4593 i += tx_ring->count;
4596 i--; 4594 i--;
4597 buffer_info = &tx_ring->buffer_info[i]; 4595 buffer_info = &tx_ring->buffer_info[i];
4598 e1000_put_txbuf(adapter, buffer_info);; 4596 e1000_put_txbuf(adapter, buffer_info);
4599 } 4597 }
4600 4598
4601 return 0; 4599 return 0;
@@ -5465,6 +5463,36 @@ static void e1000_shutdown(struct pci_dev *pdev)
5465} 5463}
5466 5464
5467#ifdef CONFIG_NET_POLL_CONTROLLER 5465#ifdef CONFIG_NET_POLL_CONTROLLER
5466
5467static irqreturn_t e1000_intr_msix(int irq, void *data)
5468{
5469 struct net_device *netdev = data;
5470 struct e1000_adapter *adapter = netdev_priv(netdev);
5471 int vector, msix_irq;
5472
5473 if (adapter->msix_entries) {
5474 vector = 0;
5475 msix_irq = adapter->msix_entries[vector].vector;
5476 disable_irq(msix_irq);
5477 e1000_intr_msix_rx(msix_irq, netdev);
5478 enable_irq(msix_irq);
5479
5480 vector++;
5481 msix_irq = adapter->msix_entries[vector].vector;
5482 disable_irq(msix_irq);
5483 e1000_intr_msix_tx(msix_irq, netdev);
5484 enable_irq(msix_irq);
5485
5486 vector++;
5487 msix_irq = adapter->msix_entries[vector].vector;
5488 disable_irq(msix_irq);
5489 e1000_msix_other(msix_irq, netdev);
5490 enable_irq(msix_irq);
5491 }
5492
5493 return IRQ_HANDLED;
5494}
5495
5468/* 5496/*
5469 * Polling 'interrupt' - used by things like netconsole to send skbs 5497 * Polling 'interrupt' - used by things like netconsole to send skbs
5470 * without having to re-enable interrupts. It's not called while 5498 * without having to re-enable interrupts. It's not called while
@@ -5474,10 +5502,21 @@ static void e1000_netpoll(struct net_device *netdev)
5474{ 5502{
5475 struct e1000_adapter *adapter = netdev_priv(netdev); 5503 struct e1000_adapter *adapter = netdev_priv(netdev);
5476 5504
5477 disable_irq(adapter->pdev->irq); 5505 switch (adapter->int_mode) {
5478 e1000_intr(adapter->pdev->irq, netdev); 5506 case E1000E_INT_MODE_MSIX:
5479 5507 e1000_intr_msix(adapter->pdev->irq, netdev);
5480 enable_irq(adapter->pdev->irq); 5508 break;
5509 case E1000E_INT_MODE_MSI:
5510 disable_irq(adapter->pdev->irq);
5511 e1000_intr_msi(adapter->pdev->irq, netdev);
5512 enable_irq(adapter->pdev->irq);
5513 break;
5514 default: /* E1000E_INT_MODE_LEGACY */
5515 disable_irq(adapter->pdev->irq);
5516 e1000_intr(adapter->pdev->irq, netdev);
5517 enable_irq(adapter->pdev->irq);
5518 break;
5519 }
5481} 5520}
5482#endif 5521#endif
5483 5522
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 7c826319ee5..9e19fbc2f17 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -302,7 +302,7 @@ struct eepro_local {
302#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */ 302#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
303#define ee_id_eepro10p1 0x31 303#define ee_id_eepro10p1 0x31
304 304
305#define TX_TIMEOUT 40 305#define TX_TIMEOUT ((4*HZ)/10)
306 306
307/* Index to functions, as function prototypes. */ 307/* Index to functions, as function prototypes. */
308 308
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b95f087cd5a..69f61523fcc 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -1506,12 +1506,10 @@ static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1506{ 1506{
1507 int arr_size = sizeof(void *) * max_q_entries; 1507 int arr_size = sizeof(void *) * max_q_entries;
1508 1508
1509 q_skba->arr = vmalloc(arr_size); 1509 q_skba->arr = vzalloc(arr_size);
1510 if (!q_skba->arr) 1510 if (!q_skba->arr)
1511 return -ENOMEM; 1511 return -ENOMEM;
1512 1512
1513 memset(q_skba->arr, 0, arr_size);
1514
1515 q_skba->len = max_q_entries; 1513 q_skba->len = max_q_entries;
1516 q_skba->index = 0; 1514 q_skba->index = 0;
1517 q_skba->os_skbs = 0; 1515 q_skba->os_skbs = 0;
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c91d364c552..70672541364 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "1.4.1.6" 35#define DRV_VERSION "1.4.1.7"
36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a466ef91dd4..9f293fa2476 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -2042,7 +2042,7 @@ static int enic_dev_hang_reset(struct enic *enic)
2042 2042
2043static int enic_set_rsskey(struct enic *enic) 2043static int enic_set_rsskey(struct enic *enic)
2044{ 2044{
2045 u64 rss_key_buf_pa; 2045 dma_addr_t rss_key_buf_pa;
2046 union vnic_rss_key *rss_key_buf_va = NULL; 2046 union vnic_rss_key *rss_key_buf_va = NULL;
2047 union vnic_rss_key rss_key = { 2047 union vnic_rss_key rss_key = {
2048 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}, 2048 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
@@ -2073,7 +2073,7 @@ static int enic_set_rsskey(struct enic *enic)
2073 2073
2074static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) 2074static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
2075{ 2075{
2076 u64 rss_cpu_buf_pa; 2076 dma_addr_t rss_cpu_buf_pa;
2077 union vnic_rss_cpu *rss_cpu_buf_va = NULL; 2077 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
2078 unsigned int i; 2078 unsigned int i;
2079 int err; 2079 int err;
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index c5a2fe099a8..b79d7e1555d 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h>
22#include <net/ethoc.h> 23#include <net/ethoc.h>
23 24
24static int buffer_size = 0x8000; /* 32 KBytes */ 25static int buffer_size = 0x8000; /* 32 KBytes */
@@ -184,7 +185,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
184 * @netdev: pointer to network device structure 185 * @netdev: pointer to network device structure
185 * @napi: NAPI structure 186 * @napi: NAPI structure
186 * @msg_enable: device state flags 187 * @msg_enable: device state flags
187 * @rx_lock: receive lock
188 * @lock: device lock 188 * @lock: device lock
189 * @phy: attached PHY 189 * @phy: attached PHY
190 * @mdio: MDIO bus for PHY access 190 * @mdio: MDIO bus for PHY access
@@ -209,7 +209,6 @@ struct ethoc {
209 struct napi_struct napi; 209 struct napi_struct napi;
210 u32 msg_enable; 210 u32 msg_enable;
211 211
212 spinlock_t rx_lock;
213 spinlock_t lock; 212 spinlock_t lock;
214 213
215 struct phy_device *phy; 214 struct phy_device *phy;
@@ -413,10 +412,21 @@ static int ethoc_rx(struct net_device *dev, int limit)
413 unsigned int entry; 412 unsigned int entry;
414 struct ethoc_bd bd; 413 struct ethoc_bd bd;
415 414
416 entry = priv->num_tx + (priv->cur_rx % priv->num_rx); 415 entry = priv->num_tx + priv->cur_rx;
417 ethoc_read_bd(priv, entry, &bd); 416 ethoc_read_bd(priv, entry, &bd);
418 if (bd.stat & RX_BD_EMPTY) 417 if (bd.stat & RX_BD_EMPTY) {
419 break; 418 ethoc_ack_irq(priv, INT_MASK_RX);
419 /* If packet (interrupt) came in between checking
420 * BD_EMTPY and clearing the interrupt source, then we
421 * risk missing the packet as the RX interrupt won't
422 * trigger right away when we reenable it; hence, check
423 * BD_EMTPY here again to make sure there isn't such a
424 * packet waiting for us...
425 */
426 ethoc_read_bd(priv, entry, &bd);
427 if (bd.stat & RX_BD_EMPTY)
428 break;
429 }
420 430
421 if (ethoc_update_rx_stats(priv, &bd) == 0) { 431 if (ethoc_update_rx_stats(priv, &bd) == 0) {
422 int size = bd.stat >> 16; 432 int size = bd.stat >> 16;
@@ -446,13 +456,14 @@ static int ethoc_rx(struct net_device *dev, int limit)
446 bd.stat &= ~RX_BD_STATS; 456 bd.stat &= ~RX_BD_STATS;
447 bd.stat |= RX_BD_EMPTY; 457 bd.stat |= RX_BD_EMPTY;
448 ethoc_write_bd(priv, entry, &bd); 458 ethoc_write_bd(priv, entry, &bd);
449 priv->cur_rx++; 459 if (++priv->cur_rx == priv->num_rx)
460 priv->cur_rx = 0;
450 } 461 }
451 462
452 return count; 463 return count;
453} 464}
454 465
455static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) 466static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
456{ 467{
457 struct net_device *netdev = dev->netdev; 468 struct net_device *netdev = dev->netdev;
458 469
@@ -482,32 +493,44 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
482 netdev->stats.collisions += (bd->stat >> 4) & 0xf; 493 netdev->stats.collisions += (bd->stat >> 4) & 0xf;
483 netdev->stats.tx_bytes += bd->stat >> 16; 494 netdev->stats.tx_bytes += bd->stat >> 16;
484 netdev->stats.tx_packets++; 495 netdev->stats.tx_packets++;
485 return 0;
486} 496}
487 497
488static void ethoc_tx(struct net_device *dev) 498static int ethoc_tx(struct net_device *dev, int limit)
489{ 499{
490 struct ethoc *priv = netdev_priv(dev); 500 struct ethoc *priv = netdev_priv(dev);
501 int count;
502 struct ethoc_bd bd;
491 503
492 spin_lock(&priv->lock); 504 for (count = 0; count < limit; ++count) {
505 unsigned int entry;
493 506
494 while (priv->dty_tx != priv->cur_tx) { 507 entry = priv->dty_tx & (priv->num_tx-1);
495 unsigned int entry = priv->dty_tx % priv->num_tx;
496 struct ethoc_bd bd;
497 508
498 ethoc_read_bd(priv, entry, &bd); 509 ethoc_read_bd(priv, entry, &bd);
499 if (bd.stat & TX_BD_READY)
500 break;
501 510
502 entry = (++priv->dty_tx) % priv->num_tx; 511 if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
503 (void)ethoc_update_tx_stats(priv, &bd); 512 ethoc_ack_irq(priv, INT_MASK_TX);
513 /* If interrupt came in between reading in the BD
514 * and clearing the interrupt source, then we risk
515 * missing the event as the TX interrupt won't trigger
516 * right away when we reenable it; hence, check
517 * BD_EMPTY here again to make sure there isn't such an
518 * event pending...
519 */
520 ethoc_read_bd(priv, entry, &bd);
521 if (bd.stat & TX_BD_READY ||
522 (priv->dty_tx == priv->cur_tx))
523 break;
524 }
525
526 ethoc_update_tx_stats(priv, &bd);
527 priv->dty_tx++;
504 } 528 }
505 529
506 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) 530 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
507 netif_wake_queue(dev); 531 netif_wake_queue(dev);
508 532
509 ethoc_ack_irq(priv, INT_MASK_TX); 533 return count;
510 spin_unlock(&priv->lock);
511} 534}
512 535
513static irqreturn_t ethoc_interrupt(int irq, void *dev_id) 536static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
@@ -515,32 +538,38 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
515 struct net_device *dev = dev_id; 538 struct net_device *dev = dev_id;
516 struct ethoc *priv = netdev_priv(dev); 539 struct ethoc *priv = netdev_priv(dev);
517 u32 pending; 540 u32 pending;
518 541 u32 mask;
519 ethoc_disable_irq(priv, INT_MASK_ALL); 542
543 /* Figure out what triggered the interrupt...
544 * The tricky bit here is that the interrupt source bits get
545 * set in INT_SOURCE for an event irregardless of whether that
546 * event is masked or not. Thus, in order to figure out what
547 * triggered the interrupt, we need to remove the sources
548 * for all events that are currently masked. This behaviour
549 * is not particularly well documented but reasonable...
550 */
551 mask = ethoc_read(priv, INT_MASK);
520 pending = ethoc_read(priv, INT_SOURCE); 552 pending = ethoc_read(priv, INT_SOURCE);
553 pending &= mask;
554
521 if (unlikely(pending == 0)) { 555 if (unlikely(pending == 0)) {
522 ethoc_enable_irq(priv, INT_MASK_ALL);
523 return IRQ_NONE; 556 return IRQ_NONE;
524 } 557 }
525 558
526 ethoc_ack_irq(priv, pending); 559 ethoc_ack_irq(priv, pending);
527 560
561 /* We always handle the dropped packet interrupt */
528 if (pending & INT_MASK_BUSY) { 562 if (pending & INT_MASK_BUSY) {
529 dev_err(&dev->dev, "packet dropped\n"); 563 dev_err(&dev->dev, "packet dropped\n");
530 dev->stats.rx_dropped++; 564 dev->stats.rx_dropped++;
531 } 565 }
532 566
533 if (pending & INT_MASK_RX) { 567 /* Handle receive/transmit event by switching to polling */
534 if (napi_schedule_prep(&priv->napi)) 568 if (pending & (INT_MASK_TX | INT_MASK_RX)) {
535 __napi_schedule(&priv->napi); 569 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
536 } else { 570 napi_schedule(&priv->napi);
537 ethoc_enable_irq(priv, INT_MASK_RX);
538 } 571 }
539 572
540 if (pending & INT_MASK_TX)
541 ethoc_tx(dev);
542
543 ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
544 return IRQ_HANDLED; 573 return IRQ_HANDLED;
545} 574}
546 575
@@ -566,26 +595,29 @@ static int ethoc_get_mac_address(struct net_device *dev, void *addr)
566static int ethoc_poll(struct napi_struct *napi, int budget) 595static int ethoc_poll(struct napi_struct *napi, int budget)
567{ 596{
568 struct ethoc *priv = container_of(napi, struct ethoc, napi); 597 struct ethoc *priv = container_of(napi, struct ethoc, napi);
569 int work_done = 0; 598 int rx_work_done = 0;
599 int tx_work_done = 0;
600
601 rx_work_done = ethoc_rx(priv->netdev, budget);
602 tx_work_done = ethoc_tx(priv->netdev, budget);
570 603
571 work_done = ethoc_rx(priv->netdev, budget); 604 if (rx_work_done < budget && tx_work_done < budget) {
572 if (work_done < budget) {
573 ethoc_enable_irq(priv, INT_MASK_RX);
574 napi_complete(napi); 605 napi_complete(napi);
606 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
575 } 607 }
576 608
577 return work_done; 609 return rx_work_done;
578} 610}
579 611
580static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) 612static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
581{ 613{
582 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
583 struct ethoc *priv = bus->priv; 614 struct ethoc *priv = bus->priv;
615 int i;
584 616
585 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 617 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
586 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); 618 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
587 619
588 while (time_before(jiffies, timeout)) { 620 for (i=0; i < 5; i++) {
589 u32 status = ethoc_read(priv, MIISTATUS); 621 u32 status = ethoc_read(priv, MIISTATUS);
590 if (!(status & MIISTATUS_BUSY)) { 622 if (!(status & MIISTATUS_BUSY)) {
591 u32 data = ethoc_read(priv, MIIRX_DATA); 623 u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -593,8 +625,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
593 ethoc_write(priv, MIICOMMAND, 0); 625 ethoc_write(priv, MIICOMMAND, 0);
594 return data; 626 return data;
595 } 627 }
596 628 usleep_range(100,200);
597 schedule();
598 } 629 }
599 630
600 return -EBUSY; 631 return -EBUSY;
@@ -602,22 +633,21 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
602 633
603static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) 634static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
604{ 635{
605 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
606 struct ethoc *priv = bus->priv; 636 struct ethoc *priv = bus->priv;
637 int i;
607 638
608 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 639 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
609 ethoc_write(priv, MIITX_DATA, val); 640 ethoc_write(priv, MIITX_DATA, val);
610 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); 641 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
611 642
612 while (time_before(jiffies, timeout)) { 643 for (i=0; i < 5; i++) {
613 u32 stat = ethoc_read(priv, MIISTATUS); 644 u32 stat = ethoc_read(priv, MIISTATUS);
614 if (!(stat & MIISTATUS_BUSY)) { 645 if (!(stat & MIISTATUS_BUSY)) {
615 /* reset MII command register */ 646 /* reset MII command register */
616 ethoc_write(priv, MIICOMMAND, 0); 647 ethoc_write(priv, MIICOMMAND, 0);
617 return 0; 648 return 0;
618 } 649 }
619 650 usleep_range(100,200);
620 schedule();
621 } 651 }
622 652
623 return -EBUSY; 653 return -EBUSY;
@@ -971,9 +1001,17 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
971 /* calculate the number of TX/RX buffers, maximum 128 supported */ 1001 /* calculate the number of TX/RX buffers, maximum 128 supported */
972 num_bd = min_t(unsigned int, 1002 num_bd = min_t(unsigned int,
973 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); 1003 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
974 priv->num_tx = max(2, num_bd / 4); 1004 if (num_bd < 4) {
1005 ret = -ENODEV;
1006 goto error;
1007 }
1008 /* num_tx must be a power of two */
1009 priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
975 priv->num_rx = num_bd - priv->num_tx; 1010 priv->num_rx = num_bd - priv->num_tx;
976 1011
1012 dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
1013 priv->num_tx, priv->num_rx);
1014
977 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); 1015 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
978 if (!priv->vma) { 1016 if (!priv->vma) {
979 ret = -ENOMEM; 1017 ret = -ENOMEM;
@@ -982,10 +1020,23 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
982 1020
983 /* Allow the platform setup code to pass in a MAC address. */ 1021 /* Allow the platform setup code to pass in a MAC address. */
984 if (pdev->dev.platform_data) { 1022 if (pdev->dev.platform_data) {
985 struct ethoc_platform_data *pdata = 1023 struct ethoc_platform_data *pdata = pdev->dev.platform_data;
986 (struct ethoc_platform_data *)pdev->dev.platform_data;
987 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1024 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
988 priv->phy_id = pdata->phy_id; 1025 priv->phy_id = pdata->phy_id;
1026 } else {
1027 priv->phy_id = -1;
1028
1029#ifdef CONFIG_OF
1030 {
1031 const uint8_t* mac;
1032
1033 mac = of_get_property(pdev->dev.of_node,
1034 "local-mac-address",
1035 NULL);
1036 if (mac)
1037 memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
1038 }
1039#endif
989 } 1040 }
990 1041
991 /* Check that the given MAC address is valid. If it isn't, read the 1042 /* Check that the given MAC address is valid. If it isn't, read the
@@ -1046,7 +1097,6 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
1046 /* setup NAPI */ 1097 /* setup NAPI */
1047 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1098 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1048 1099
1049 spin_lock_init(&priv->rx_lock);
1050 spin_lock_init(&priv->lock); 1100 spin_lock_init(&priv->lock);
1051 1101
1052 ret = register_netdev(netdev); 1102 ret = register_netdev(netdev);
@@ -1113,6 +1163,16 @@ static int ethoc_resume(struct platform_device *pdev)
1113# define ethoc_resume NULL 1163# define ethoc_resume NULL
1114#endif 1164#endif
1115 1165
1166#ifdef CONFIG_OF
1167static struct of_device_id ethoc_match[] = {
1168 {
1169 .compatible = "opencores,ethoc",
1170 },
1171 {},
1172};
1173MODULE_DEVICE_TABLE(of, ethoc_match);
1174#endif
1175
1116static struct platform_driver ethoc_driver = { 1176static struct platform_driver ethoc_driver = {
1117 .probe = ethoc_probe, 1177 .probe = ethoc_probe,
1118 .remove = __devexit_p(ethoc_remove), 1178 .remove = __devexit_p(ethoc_remove),
@@ -1120,6 +1180,10 @@ static struct platform_driver ethoc_driver = {
1120 .resume = ethoc_resume, 1180 .resume = ethoc_resume,
1121 .driver = { 1181 .driver = {
1122 .name = "ethoc", 1182 .name = "ethoc",
1183 .owner = THIS_MODULE,
1184#ifdef CONFIG_OF
1185 .of_match_table = ethoc_match,
1186#endif
1123 }, 1187 },
1124}; 1188};
1125 1189
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index e9f5d030bc2..50c1213f61f 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -366,9 +366,8 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
366{ 366{
367 struct net_device *dev = dev_id; 367 struct net_device *dev = dev_id;
368 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 368 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
369 unsigned long flags;
370 369
371 spin_lock_irqsave(&priv->lock, flags); 370 spin_lock(&priv->lock);
372 while (bcom_buffer_done(priv->tx_dmatsk)) { 371 while (bcom_buffer_done(priv->tx_dmatsk)) {
373 struct sk_buff *skb; 372 struct sk_buff *skb;
374 struct bcom_fec_bd *bd; 373 struct bcom_fec_bd *bd;
@@ -379,7 +378,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
379 378
380 dev_kfree_skb_irq(skb); 379 dev_kfree_skb_irq(skb);
381 } 380 }
382 spin_unlock_irqrestore(&priv->lock, flags); 381 spin_unlock(&priv->lock);
383 382
384 netif_wake_queue(dev); 383 netif_wake_queue(dev);
385 384
@@ -395,9 +394,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
395 struct bcom_fec_bd *bd; 394 struct bcom_fec_bd *bd;
396 u32 status, physaddr; 395 u32 status, physaddr;
397 int length; 396 int length;
398 unsigned long flags;
399 397
400 spin_lock_irqsave(&priv->lock, flags); 398 spin_lock(&priv->lock);
401 399
402 while (bcom_buffer_done(priv->rx_dmatsk)) { 400 while (bcom_buffer_done(priv->rx_dmatsk)) {
403 401
@@ -429,7 +427,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
429 427
430 /* Process the received skb - Drop the spin lock while 428 /* Process the received skb - Drop the spin lock while
431 * calling into the network stack */ 429 * calling into the network stack */
432 spin_unlock_irqrestore(&priv->lock, flags); 430 spin_unlock(&priv->lock);
433 431
434 dma_unmap_single(dev->dev.parent, physaddr, rskb->len, 432 dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
435 DMA_FROM_DEVICE); 433 DMA_FROM_DEVICE);
@@ -438,10 +436,10 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
438 rskb->protocol = eth_type_trans(rskb, dev); 436 rskb->protocol = eth_type_trans(rskb, dev);
439 netif_rx(rskb); 437 netif_rx(rskb);
440 438
441 spin_lock_irqsave(&priv->lock, flags); 439 spin_lock(&priv->lock);
442 } 440 }
443 441
444 spin_unlock_irqrestore(&priv->lock, flags); 442 spin_unlock(&priv->lock);
445 443
446 return IRQ_HANDLED; 444 return IRQ_HANDLED;
447} 445}
@@ -452,7 +450,6 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
452 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 450 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
453 struct mpc52xx_fec __iomem *fec = priv->fec; 451 struct mpc52xx_fec __iomem *fec = priv->fec;
454 u32 ievent; 452 u32 ievent;
455 unsigned long flags;
456 453
457 ievent = in_be32(&fec->ievent); 454 ievent = in_be32(&fec->ievent);
458 455
@@ -470,9 +467,9 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
470 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) 467 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
471 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); 468 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
472 469
473 spin_lock_irqsave(&priv->lock, flags); 470 spin_lock(&priv->lock);
474 mpc52xx_fec_reset(dev); 471 mpc52xx_fec_reset(dev);
475 spin_unlock_irqrestore(&priv->lock, flags); 472 spin_unlock(&priv->lock);
476 473
477 return IRQ_HANDLED; 474 return IRQ_HANDLED;
478 } 475 }
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0fa1776563a..cd2d72d825d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -39,6 +39,9 @@
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic. 40 * superfluous timer interrupts from the nic.
41 */ 41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
42#define FORCEDETH_VERSION "0.64" 45#define FORCEDETH_VERSION "0.64"
43#define DRV_NAME "forcedeth" 46#define DRV_NAME "forcedeth"
44 47
@@ -60,18 +63,12 @@
60#include <linux/if_vlan.h> 63#include <linux/if_vlan.h>
61#include <linux/dma-mapping.h> 64#include <linux/dma-mapping.h>
62#include <linux/slab.h> 65#include <linux/slab.h>
66#include <linux/uaccess.h>
67#include <linux/io.h>
63 68
64#include <asm/irq.h> 69#include <asm/irq.h>
65#include <asm/io.h>
66#include <asm/uaccess.h>
67#include <asm/system.h> 70#include <asm/system.h>
68 71
69#if 0
70#define dprintk printk
71#else
72#define dprintk(x...) do { } while (0)
73#endif
74
75#define TX_WORK_PER_LOOP 64 72#define TX_WORK_PER_LOOP 64
76#define RX_WORK_PER_LOOP 64 73#define RX_WORK_PER_LOOP 64
77 74
@@ -186,9 +183,9 @@ enum {
186 NvRegSlotTime = 0x9c, 183 NvRegSlotTime = 0x9c,
187#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 184#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
188#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 185#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
189#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 186#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
190#define NVREG_SLOTTIME_HALF 0x0000ff00 187#define NVREG_SLOTTIME_HALF 0x0000ff00
191#define NVREG_SLOTTIME_DEFAULT 0x00007f00 188#define NVREG_SLOTTIME_DEFAULT 0x00007f00
192#define NVREG_SLOTTIME_MASK 0x000000ff 189#define NVREG_SLOTTIME_MASK 0x000000ff
193 190
194 NvRegTxDeferral = 0xA0, 191 NvRegTxDeferral = 0xA0,
@@ -297,7 +294,7 @@ enum {
297#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 294#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
298 295
299 NvRegMgmtUnitGetVersion = 0x204, 296 NvRegMgmtUnitGetVersion = 0x204,
300#define NVREG_MGMTUNITGETVERSION 0x01 297#define NVREG_MGMTUNITGETVERSION 0x01
301 NvRegMgmtUnitVersion = 0x208, 298 NvRegMgmtUnitVersion = 0x208,
302#define NVREG_MGMTUNITVERSION 0x08 299#define NVREG_MGMTUNITVERSION 0x08
303 NvRegPowerCap = 0x268, 300 NvRegPowerCap = 0x268,
@@ -368,8 +365,8 @@ struct ring_desc_ex {
368}; 365};
369 366
370union ring_type { 367union ring_type {
371 struct ring_desc* orig; 368 struct ring_desc *orig;
372 struct ring_desc_ex* ex; 369 struct ring_desc_ex *ex;
373}; 370};
374 371
375#define FLAG_MASK_V1 0xffff0000 372#define FLAG_MASK_V1 0xffff0000
@@ -444,10 +441,10 @@ union ring_type {
444#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 441#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
445 442
446/* Miscelaneous hardware related defines: */ 443/* Miscelaneous hardware related defines: */
447#define NV_PCI_REGSZ_VER1 0x270 444#define NV_PCI_REGSZ_VER1 0x270
448#define NV_PCI_REGSZ_VER2 0x2d4 445#define NV_PCI_REGSZ_VER2 0x2d4
449#define NV_PCI_REGSZ_VER3 0x604 446#define NV_PCI_REGSZ_VER3 0x604
450#define NV_PCI_REGSZ_MAX 0x604 447#define NV_PCI_REGSZ_MAX 0x604
451 448
452/* various timeout delays: all in usec */ 449/* various timeout delays: all in usec */
453#define NV_TXRX_RESET_DELAY 4 450#define NV_TXRX_RESET_DELAY 4
@@ -717,7 +714,7 @@ static const struct register_test nv_registers_test[] = {
717 { NvRegMulticastAddrA, 0xffffffff }, 714 { NvRegMulticastAddrA, 0xffffffff },
718 { NvRegTxWatermark, 0x0ff }, 715 { NvRegTxWatermark, 0x0ff },
719 { NvRegWakeUpFlags, 0x07777 }, 716 { NvRegWakeUpFlags, 0x07777 },
720 { 0,0 } 717 { 0, 0 }
721}; 718};
722 719
723struct nv_skb_map { 720struct nv_skb_map {
@@ -911,7 +908,7 @@ static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
911 * Power down phy when interface is down (persists through reboot; 908 * Power down phy when interface is down (persists through reboot;
912 * older Linux and other OSes may not power it up again) 909 * older Linux and other OSes may not power it up again)
913 */ 910 */
914static int phy_power_down = 0; 911static int phy_power_down;
915 912
916static inline struct fe_priv *get_nvpriv(struct net_device *dev) 913static inline struct fe_priv *get_nvpriv(struct net_device *dev)
917{ 914{
@@ -948,7 +945,7 @@ static bool nv_optimized(struct fe_priv *np)
948} 945}
949 946
950static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 947static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
951 int delay, int delaymax, const char *msg) 948 int delay, int delaymax)
952{ 949{
953 u8 __iomem *base = get_hwbase(dev); 950 u8 __iomem *base = get_hwbase(dev);
954 951
@@ -956,11 +953,8 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
956 do { 953 do {
957 udelay(delay); 954 udelay(delay);
958 delaymax -= delay; 955 delaymax -= delay;
959 if (delaymax < 0) { 956 if (delaymax < 0)
960 if (msg)
961 printk("%s", msg);
962 return 1; 957 return 1;
963 }
964 } while ((readl(base + offset) & mask) != target); 958 } while ((readl(base + offset) & mask) != target);
965 return 0; 959 return 0;
966} 960}
@@ -984,12 +978,10 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
984 u8 __iomem *base = get_hwbase(dev); 978 u8 __iomem *base = get_hwbase(dev);
985 979
986 if (!nv_optimized(np)) { 980 if (!nv_optimized(np)) {
987 if (rxtx_flags & NV_SETUP_RX_RING) { 981 if (rxtx_flags & NV_SETUP_RX_RING)
988 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 982 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
989 } 983 if (rxtx_flags & NV_SETUP_TX_RING)
990 if (rxtx_flags & NV_SETUP_TX_RING) {
991 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 984 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
992 }
993 } else { 985 } else {
994 if (rxtx_flags & NV_SETUP_RX_RING) { 986 if (rxtx_flags & NV_SETUP_RX_RING) {
995 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 987 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
@@ -1015,10 +1007,8 @@ static void free_rings(struct net_device *dev)
1015 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 1007 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1016 np->rx_ring.ex, np->ring_addr); 1008 np->rx_ring.ex, np->ring_addr);
1017 } 1009 }
1018 if (np->rx_skb) 1010 kfree(np->rx_skb);
1019 kfree(np->rx_skb); 1011 kfree(np->tx_skb);
1020 if (np->tx_skb)
1021 kfree(np->tx_skb);
1022} 1012}
1023 1013
1024static int using_multi_irqs(struct net_device *dev) 1014static int using_multi_irqs(struct net_device *dev)
@@ -1145,23 +1135,15 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1145 writel(reg, base + NvRegMIIControl); 1135 writel(reg, base + NvRegMIIControl);
1146 1136
1147 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1137 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1148 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1138 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1149 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1150 dev->name, miireg, addr);
1151 retval = -1; 1139 retval = -1;
1152 } else if (value != MII_READ) { 1140 } else if (value != MII_READ) {
1153 /* it was a write operation - fewer failures are detectable */ 1141 /* it was a write operation - fewer failures are detectable */
1154 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1155 dev->name, value, miireg, addr);
1156 retval = 0; 1142 retval = 0;
1157 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1143 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1158 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1159 dev->name, miireg, addr);
1160 retval = -1; 1144 retval = -1;
1161 } else { 1145 } else {
1162 retval = readl(base + NvRegMIIData); 1146 retval = readl(base + NvRegMIIData);
1163 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1164 dev->name, miireg, addr, retval);
1165 } 1147 }
1166 1148
1167 return retval; 1149 return retval;
@@ -1174,16 +1156,15 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1174 unsigned int tries = 0; 1156 unsigned int tries = 0;
1175 1157
1176 miicontrol = BMCR_RESET | bmcr_setup; 1158 miicontrol = BMCR_RESET | bmcr_setup;
1177 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1159 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1178 return -1; 1160 return -1;
1179 }
1180 1161
1181 /* wait for 500ms */ 1162 /* wait for 500ms */
1182 msleep(500); 1163 msleep(500);
1183 1164
1184 /* must wait till reset is deasserted */ 1165 /* must wait till reset is deasserted */
1185 while (miicontrol & BMCR_RESET) { 1166 while (miicontrol & BMCR_RESET) {
1186 msleep(10); 1167 usleep_range(10000, 20000);
1187 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1168 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1188 /* FIXME: 100 tries seem excessive */ 1169 /* FIXME: 100 tries seem excessive */
1189 if (tries++ > 100) 1170 if (tries++ > 100)
@@ -1192,106 +1173,239 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1192 return 0; 1173 return 0;
1193} 1174}
1194 1175
1176static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1177{
1178 static const struct {
1179 int reg;
1180 int init;
1181 } ri[] = {
1182 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1183 { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1184 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1185 { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1186 { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1187 { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1188 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1189 };
1190 int i;
1191
1192 for (i = 0; i < ARRAY_SIZE(ri); i++) {
1193 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1194 return PHY_ERROR;
1195 }
1196
1197 return 0;
1198}
1199
1200static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1201{
1202 u32 reg;
1203 u8 __iomem *base = get_hwbase(dev);
1204 u32 powerstate = readl(base + NvRegPowerState2);
1205
1206 /* need to perform hw phy reset */
1207 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1208 writel(powerstate, base + NvRegPowerState2);
1209 msleep(25);
1210
1211 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1212 writel(powerstate, base + NvRegPowerState2);
1213 msleep(25);
1214
1215 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1216 reg |= PHY_REALTEK_INIT9;
1217 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1218 return PHY_ERROR;
1219 if (mii_rw(dev, np->phyaddr,
1220 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1221 return PHY_ERROR;
1222 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1223 if (!(reg & PHY_REALTEK_INIT11)) {
1224 reg |= PHY_REALTEK_INIT11;
1225 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1226 return PHY_ERROR;
1227 }
1228 if (mii_rw(dev, np->phyaddr,
1229 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1230 return PHY_ERROR;
1231
1232 return 0;
1233}
1234
1235static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1236{
1237 u32 phy_reserved;
1238
1239 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1240 phy_reserved = mii_rw(dev, np->phyaddr,
1241 PHY_REALTEK_INIT_REG6, MII_READ);
1242 phy_reserved |= PHY_REALTEK_INIT7;
1243 if (mii_rw(dev, np->phyaddr,
1244 PHY_REALTEK_INIT_REG6, phy_reserved))
1245 return PHY_ERROR;
1246 }
1247
1248 return 0;
1249}
1250
1251static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1252{
1253 u32 phy_reserved;
1254
1255 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1256 if (mii_rw(dev, np->phyaddr,
1257 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1258 return PHY_ERROR;
1259 phy_reserved = mii_rw(dev, np->phyaddr,
1260 PHY_REALTEK_INIT_REG2, MII_READ);
1261 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1262 phy_reserved |= PHY_REALTEK_INIT3;
1263 if (mii_rw(dev, np->phyaddr,
1264 PHY_REALTEK_INIT_REG2, phy_reserved))
1265 return PHY_ERROR;
1266 if (mii_rw(dev, np->phyaddr,
1267 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1268 return PHY_ERROR;
1269 }
1270
1271 return 0;
1272}
1273
1274static int init_cicada(struct net_device *dev, struct fe_priv *np,
1275 u32 phyinterface)
1276{
1277 u32 phy_reserved;
1278
1279 if (phyinterface & PHY_RGMII) {
1280 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1281 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1282 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1283 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1284 return PHY_ERROR;
1285 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1286 phy_reserved |= PHY_CICADA_INIT5;
1287 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1288 return PHY_ERROR;
1289 }
1290 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1291 phy_reserved |= PHY_CICADA_INIT6;
1292 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1293 return PHY_ERROR;
1294
1295 return 0;
1296}
1297
1298static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1299{
1300 u32 phy_reserved;
1301
1302 if (mii_rw(dev, np->phyaddr,
1303 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1304 return PHY_ERROR;
1305 if (mii_rw(dev, np->phyaddr,
1306 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1307 return PHY_ERROR;
1308 phy_reserved = mii_rw(dev, np->phyaddr,
1309 PHY_VITESSE_INIT_REG4, MII_READ);
1310 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1311 return PHY_ERROR;
1312 phy_reserved = mii_rw(dev, np->phyaddr,
1313 PHY_VITESSE_INIT_REG3, MII_READ);
1314 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1315 phy_reserved |= PHY_VITESSE_INIT3;
1316 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1317 return PHY_ERROR;
1318 if (mii_rw(dev, np->phyaddr,
1319 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1320 return PHY_ERROR;
1321 if (mii_rw(dev, np->phyaddr,
1322 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1323 return PHY_ERROR;
1324 phy_reserved = mii_rw(dev, np->phyaddr,
1325 PHY_VITESSE_INIT_REG4, MII_READ);
1326 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1327 phy_reserved |= PHY_VITESSE_INIT3;
1328 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1329 return PHY_ERROR;
1330 phy_reserved = mii_rw(dev, np->phyaddr,
1331 PHY_VITESSE_INIT_REG3, MII_READ);
1332 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1333 return PHY_ERROR;
1334 if (mii_rw(dev, np->phyaddr,
1335 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1336 return PHY_ERROR;
1337 if (mii_rw(dev, np->phyaddr,
1338 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1339 return PHY_ERROR;
1340 phy_reserved = mii_rw(dev, np->phyaddr,
1341 PHY_VITESSE_INIT_REG4, MII_READ);
1342 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1343 return PHY_ERROR;
1344 phy_reserved = mii_rw(dev, np->phyaddr,
1345 PHY_VITESSE_INIT_REG3, MII_READ);
1346 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1347 phy_reserved |= PHY_VITESSE_INIT8;
1348 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1349 return PHY_ERROR;
1350 if (mii_rw(dev, np->phyaddr,
1351 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1352 return PHY_ERROR;
1353 if (mii_rw(dev, np->phyaddr,
1354 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1355 return PHY_ERROR;
1356
1357 return 0;
1358}
1359
1195static int phy_init(struct net_device *dev) 1360static int phy_init(struct net_device *dev)
1196{ 1361{
1197 struct fe_priv *np = get_nvpriv(dev); 1362 struct fe_priv *np = get_nvpriv(dev);
1198 u8 __iomem *base = get_hwbase(dev); 1363 u8 __iomem *base = get_hwbase(dev);
1199 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1364 u32 phyinterface;
1365 u32 mii_status, mii_control, mii_control_1000, reg;
1200 1366
1201 /* phy errata for E3016 phy */ 1367 /* phy errata for E3016 phy */
1202 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1368 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1203 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1369 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1204 reg &= ~PHY_MARVELL_E3016_INITMASK; 1370 reg &= ~PHY_MARVELL_E3016_INITMASK;
1205 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1371 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1206 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1372 netdev_info(dev, "%s: phy write to errata reg failed\n",
1373 pci_name(np->pci_dev));
1207 return PHY_ERROR; 1374 return PHY_ERROR;
1208 } 1375 }
1209 } 1376 }
1210 if (np->phy_oui == PHY_OUI_REALTEK) { 1377 if (np->phy_oui == PHY_OUI_REALTEK) {
1211 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1378 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1212 np->phy_rev == PHY_REV_REALTEK_8211B) { 1379 np->phy_rev == PHY_REV_REALTEK_8211B) {
1213 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1380 if (init_realtek_8211b(dev, np)) {
1214 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1381 netdev_info(dev, "%s: phy init failed\n",
1215 return PHY_ERROR; 1382 pci_name(np->pci_dev));
1216 }
1217 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1218 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1219 return PHY_ERROR;
1220 }
1221 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1222 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1223 return PHY_ERROR; 1383 return PHY_ERROR;
1224 } 1384 }
1225 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1385 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1226 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1386 np->phy_rev == PHY_REV_REALTEK_8211C) {
1387 if (init_realtek_8211c(dev, np)) {
1388 netdev_info(dev, "%s: phy init failed\n",
1389 pci_name(np->pci_dev));
1227 return PHY_ERROR; 1390 return PHY_ERROR;
1228 } 1391 }
1229 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1392 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1230 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1393 if (init_realtek_8201(dev, np)) {
1394 netdev_info(dev, "%s: phy init failed\n",
1395 pci_name(np->pci_dev));
1231 return PHY_ERROR; 1396 return PHY_ERROR;
1232 } 1397 }
1233 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1234 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1235 return PHY_ERROR;
1236 }
1237 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1238 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1239 return PHY_ERROR;
1240 }
1241 }
1242 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1243 np->phy_rev == PHY_REV_REALTEK_8211C) {
1244 u32 powerstate = readl(base + NvRegPowerState2);
1245
1246 /* need to perform hw phy reset */
1247 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1248 writel(powerstate, base + NvRegPowerState2);
1249 msleep(25);
1250
1251 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1252 writel(powerstate, base + NvRegPowerState2);
1253 msleep(25);
1254
1255 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1256 reg |= PHY_REALTEK_INIT9;
1257 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
1258 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1259 return PHY_ERROR;
1260 }
1261 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
1262 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1263 return PHY_ERROR;
1264 }
1265 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1266 if (!(reg & PHY_REALTEK_INIT11)) {
1267 reg |= PHY_REALTEK_INIT11;
1268 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
1269 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1270 return PHY_ERROR;
1271 }
1272 }
1273 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1274 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1275 return PHY_ERROR;
1276 }
1277 }
1278 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1279 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1280 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1281 phy_reserved |= PHY_REALTEK_INIT7;
1282 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1283 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1284 return PHY_ERROR;
1285 }
1286 }
1287 } 1398 }
1288 } 1399 }
1289 1400
1290 /* set advertise register */ 1401 /* set advertise register */
1291 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1402 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1292 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1403 reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1404 ADVERTISE_100HALF | ADVERTISE_100FULL |
1405 ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1293 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1406 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1294 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1407 netdev_info(dev, "%s: phy write to advertise failed\n",
1408 pci_name(np->pci_dev));
1295 return PHY_ERROR; 1409 return PHY_ERROR;
1296 } 1410 }
1297 1411
@@ -1302,7 +1416,8 @@ static int phy_init(struct net_device *dev)
1302 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1416 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1303 if (mii_status & PHY_GIGABIT) { 1417 if (mii_status & PHY_GIGABIT) {
1304 np->gigabit = PHY_GIGABIT; 1418 np->gigabit = PHY_GIGABIT;
1305 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1419 mii_control_1000 = mii_rw(dev, np->phyaddr,
1420 MII_CTRL1000, MII_READ);
1306 mii_control_1000 &= ~ADVERTISE_1000HALF; 1421 mii_control_1000 &= ~ADVERTISE_1000HALF;
1307 if (phyinterface & PHY_RGMII) 1422 if (phyinterface & PHY_RGMII)
1308 mii_control_1000 |= ADVERTISE_1000FULL; 1423 mii_control_1000 |= ADVERTISE_1000FULL;
@@ -1310,11 +1425,11 @@ static int phy_init(struct net_device *dev)
1310 mii_control_1000 &= ~ADVERTISE_1000FULL; 1425 mii_control_1000 &= ~ADVERTISE_1000FULL;
1311 1426
1312 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1427 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1313 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1428 netdev_info(dev, "%s: phy init failed\n",
1429 pci_name(np->pci_dev));
1314 return PHY_ERROR; 1430 return PHY_ERROR;
1315 } 1431 }
1316 } 1432 } else
1317 else
1318 np->gigabit = 0; 1433 np->gigabit = 0;
1319 1434
1320 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1435 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1326,7 +1441,8 @@ static int phy_init(struct net_device *dev)
1326 /* start autoneg since we already performed hw reset above */ 1441 /* start autoneg since we already performed hw reset above */
1327 mii_control |= BMCR_ANRESTART; 1442 mii_control |= BMCR_ANRESTART;
1328 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1443 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1329 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev)); 1444 netdev_info(dev, "%s: phy init failed\n",
1445 pci_name(np->pci_dev));
1330 return PHY_ERROR; 1446 return PHY_ERROR;
1331 } 1447 }
1332 } else { 1448 } else {
@@ -1334,165 +1450,42 @@ static int phy_init(struct net_device *dev)
1334 * (certain phys need bmcr to be setup with reset) 1450 * (certain phys need bmcr to be setup with reset)
1335 */ 1451 */
1336 if (phy_reset(dev, mii_control)) { 1452 if (phy_reset(dev, mii_control)) {
1337 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1453 netdev_info(dev, "%s: phy reset failed\n",
1454 pci_name(np->pci_dev));
1338 return PHY_ERROR; 1455 return PHY_ERROR;
1339 } 1456 }
1340 } 1457 }
1341 1458
1342 /* phy vendor specific configuration */ 1459 /* phy vendor specific configuration */
1343 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1460 if ((np->phy_oui == PHY_OUI_CICADA)) {
1344 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1461 if (init_cicada(dev, np, phyinterface)) {
1345 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1462 netdev_info(dev, "%s: phy init failed\n",
1346 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1463 pci_name(np->pci_dev));
1347 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1348 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1349 return PHY_ERROR;
1350 }
1351 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1352 phy_reserved |= PHY_CICADA_INIT5;
1353 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1354 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1355 return PHY_ERROR;
1356 }
1357 }
1358 if (np->phy_oui == PHY_OUI_CICADA) {
1359 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1360 phy_reserved |= PHY_CICADA_INIT6;
1361 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1362 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1363 return PHY_ERROR;
1364 }
1365 }
1366 if (np->phy_oui == PHY_OUI_VITESSE) {
1367 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1368 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1369 return PHY_ERROR;
1370 }
1371 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1372 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1373 return PHY_ERROR;
1374 }
1375 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1376 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1377 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1378 return PHY_ERROR;
1379 }
1380 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1381 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1382 phy_reserved |= PHY_VITESSE_INIT3;
1383 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1384 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1385 return PHY_ERROR;
1386 }
1387 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1388 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1389 return PHY_ERROR;
1390 }
1391 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1392 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1393 return PHY_ERROR;
1394 }
1395 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1396 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1397 phy_reserved |= PHY_VITESSE_INIT3;
1398 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1399 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1400 return PHY_ERROR;
1401 }
1402 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1403 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1404 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1405 return PHY_ERROR; 1464 return PHY_ERROR;
1406 } 1465 }
1407 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1466 } else if (np->phy_oui == PHY_OUI_VITESSE) {
1408 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1467 if (init_vitesse(dev, np)) {
1468 netdev_info(dev, "%s: phy init failed\n",
1469 pci_name(np->pci_dev));
1409 return PHY_ERROR; 1470 return PHY_ERROR;
1410 } 1471 }
1411 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1472 } else if (np->phy_oui == PHY_OUI_REALTEK) {
1412 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1413 return PHY_ERROR;
1414 }
1415 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1416 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1417 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1418 return PHY_ERROR;
1419 }
1420 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1421 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1422 phy_reserved |= PHY_VITESSE_INIT8;
1423 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1424 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1425 return PHY_ERROR;
1426 }
1427 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1428 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1429 return PHY_ERROR;
1430 }
1431 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1432 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1433 return PHY_ERROR;
1434 }
1435 }
1436 if (np->phy_oui == PHY_OUI_REALTEK) {
1437 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1473 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1438 np->phy_rev == PHY_REV_REALTEK_8211B) { 1474 np->phy_rev == PHY_REV_REALTEK_8211B) {
1439 /* reset could have cleared these out, set them back */ 1475 /* reset could have cleared these out, set them back */
1440 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1476 if (init_realtek_8211b(dev, np)) {
1441 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1477 netdev_info(dev, "%s: phy init failed\n",
1442 return PHY_ERROR; 1478 pci_name(np->pci_dev));
1443 }
1444 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1445 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1446 return PHY_ERROR;
1447 }
1448 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1449 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1450 return PHY_ERROR;
1451 }
1452 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1453 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1454 return PHY_ERROR;
1455 }
1456 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1457 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1458 return PHY_ERROR;
1459 }
1460 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1461 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1462 return PHY_ERROR; 1479 return PHY_ERROR;
1463 } 1480 }
1464 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1481 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1465 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1482 if (init_realtek_8201(dev, np) ||
1483 init_realtek_8201_cross(dev, np)) {
1484 netdev_info(dev, "%s: phy init failed\n",
1485 pci_name(np->pci_dev));
1466 return PHY_ERROR; 1486 return PHY_ERROR;
1467 } 1487 }
1468 } 1488 }
1469 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1470 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1471 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1472 phy_reserved |= PHY_REALTEK_INIT7;
1473 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1474 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1475 return PHY_ERROR;
1476 }
1477 }
1478 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1479 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1480 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1481 return PHY_ERROR;
1482 }
1483 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
1484 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1485 phy_reserved |= PHY_REALTEK_INIT3;
1486 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
1487 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1488 return PHY_ERROR;
1489 }
1490 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1491 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1492 return PHY_ERROR;
1493 }
1494 }
1495 }
1496 } 1489 }
1497 1490
1498 /* some phys clear out pause advertisment on reset, set it back */ 1491 /* some phys clear out pause advertisment on reset, set it back */
@@ -1501,12 +1494,10 @@ static int phy_init(struct net_device *dev)
1501 /* restart auto negotiation, power down phy */ 1494 /* restart auto negotiation, power down phy */
1502 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1495 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1503 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1496 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1504 if (phy_power_down) { 1497 if (phy_power_down)
1505 mii_control |= BMCR_PDOWN; 1498 mii_control |= BMCR_PDOWN;
1506 } 1499 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1507 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1508 return PHY_ERROR; 1500 return PHY_ERROR;
1509 }
1510 1501
1511 return 0; 1502 return 0;
1512} 1503}
@@ -1517,7 +1508,6 @@ static void nv_start_rx(struct net_device *dev)
1517 u8 __iomem *base = get_hwbase(dev); 1508 u8 __iomem *base = get_hwbase(dev);
1518 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1509 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1519 1510
1520 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1521 /* Already running? Stop it. */ 1511 /* Already running? Stop it. */
1522 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1512 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1523 rx_ctrl &= ~NVREG_RCVCTL_START; 1513 rx_ctrl &= ~NVREG_RCVCTL_START;
@@ -1526,12 +1516,10 @@ static void nv_start_rx(struct net_device *dev)
1526 } 1516 }
1527 writel(np->linkspeed, base + NvRegLinkSpeed); 1517 writel(np->linkspeed, base + NvRegLinkSpeed);
1528 pci_push(base); 1518 pci_push(base);
1529 rx_ctrl |= NVREG_RCVCTL_START; 1519 rx_ctrl |= NVREG_RCVCTL_START;
1530 if (np->mac_in_use) 1520 if (np->mac_in_use)
1531 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1521 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1532 writel(rx_ctrl, base + NvRegReceiverControl); 1522 writel(rx_ctrl, base + NvRegReceiverControl);
1533 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1534 dev->name, np->duplex, np->linkspeed);
1535 pci_push(base); 1523 pci_push(base);
1536} 1524}
1537 1525
@@ -1541,15 +1529,15 @@ static void nv_stop_rx(struct net_device *dev)
1541 u8 __iomem *base = get_hwbase(dev); 1529 u8 __iomem *base = get_hwbase(dev);
1542 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1530 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1543 1531
1544 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1545 if (!np->mac_in_use) 1532 if (!np->mac_in_use)
1546 rx_ctrl &= ~NVREG_RCVCTL_START; 1533 rx_ctrl &= ~NVREG_RCVCTL_START;
1547 else 1534 else
1548 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1535 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1549 writel(rx_ctrl, base + NvRegReceiverControl); 1536 writel(rx_ctrl, base + NvRegReceiverControl);
1550 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1537 if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1551 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1538 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1552 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1539 netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1540 __func__);
1553 1541
1554 udelay(NV_RXSTOP_DELAY2); 1542 udelay(NV_RXSTOP_DELAY2);
1555 if (!np->mac_in_use) 1543 if (!np->mac_in_use)
@@ -1562,7 +1550,6 @@ static void nv_start_tx(struct net_device *dev)
1562 u8 __iomem *base = get_hwbase(dev); 1550 u8 __iomem *base = get_hwbase(dev);
1563 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1551 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1564 1552
1565 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1566 tx_ctrl |= NVREG_XMITCTL_START; 1553 tx_ctrl |= NVREG_XMITCTL_START;
1567 if (np->mac_in_use) 1554 if (np->mac_in_use)
1568 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1555 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
@@ -1576,15 +1563,15 @@ static void nv_stop_tx(struct net_device *dev)
1576 u8 __iomem *base = get_hwbase(dev); 1563 u8 __iomem *base = get_hwbase(dev);
1577 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1564 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1578 1565
1579 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1580 if (!np->mac_in_use) 1566 if (!np->mac_in_use)
1581 tx_ctrl &= ~NVREG_XMITCTL_START; 1567 tx_ctrl &= ~NVREG_XMITCTL_START;
1582 else 1568 else
1583 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1569 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1584 writel(tx_ctrl, base + NvRegTransmitterControl); 1570 writel(tx_ctrl, base + NvRegTransmitterControl);
1585 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1571 if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1586 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1572 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1587 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1573 netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1574 __func__);
1588 1575
1589 udelay(NV_TXSTOP_DELAY2); 1576 udelay(NV_TXSTOP_DELAY2);
1590 if (!np->mac_in_use) 1577 if (!np->mac_in_use)
@@ -1609,7 +1596,6 @@ static void nv_txrx_reset(struct net_device *dev)
1609 struct fe_priv *np = netdev_priv(dev); 1596 struct fe_priv *np = netdev_priv(dev);
1610 u8 __iomem *base = get_hwbase(dev); 1597 u8 __iomem *base = get_hwbase(dev);
1611 1598
1612 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1613 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1599 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1614 pci_push(base); 1600 pci_push(base);
1615 udelay(NV_TXRX_RESET_DELAY); 1601 udelay(NV_TXRX_RESET_DELAY);
@@ -1623,8 +1609,6 @@ static void nv_mac_reset(struct net_device *dev)
1623 u8 __iomem *base = get_hwbase(dev); 1609 u8 __iomem *base = get_hwbase(dev);
1624 u32 temp1, temp2, temp3; 1610 u32 temp1, temp2, temp3;
1625 1611
1626 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1627
1628 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1612 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1629 pci_push(base); 1613 pci_push(base);
1630 1614
@@ -1745,7 +1729,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1745static int nv_alloc_rx(struct net_device *dev) 1729static int nv_alloc_rx(struct net_device *dev)
1746{ 1730{
1747 struct fe_priv *np = netdev_priv(dev); 1731 struct fe_priv *np = netdev_priv(dev);
1748 struct ring_desc* less_rx; 1732 struct ring_desc *less_rx;
1749 1733
1750 less_rx = np->get_rx.orig; 1734 less_rx = np->get_rx.orig;
1751 if (less_rx-- == np->first_rx.orig) 1735 if (less_rx-- == np->first_rx.orig)
@@ -1767,9 +1751,8 @@ static int nv_alloc_rx(struct net_device *dev)
1767 np->put_rx.orig = np->first_rx.orig; 1751 np->put_rx.orig = np->first_rx.orig;
1768 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1752 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1769 np->put_rx_ctx = np->first_rx_ctx; 1753 np->put_rx_ctx = np->first_rx_ctx;
1770 } else { 1754 } else
1771 return 1; 1755 return 1;
1772 }
1773 } 1756 }
1774 return 0; 1757 return 0;
1775} 1758}
@@ -1777,7 +1760,7 @@ static int nv_alloc_rx(struct net_device *dev)
1777static int nv_alloc_rx_optimized(struct net_device *dev) 1760static int nv_alloc_rx_optimized(struct net_device *dev)
1778{ 1761{
1779 struct fe_priv *np = netdev_priv(dev); 1762 struct fe_priv *np = netdev_priv(dev);
1780 struct ring_desc_ex* less_rx; 1763 struct ring_desc_ex *less_rx;
1781 1764
1782 less_rx = np->get_rx.ex; 1765 less_rx = np->get_rx.ex;
1783 if (less_rx-- == np->first_rx.ex) 1766 if (less_rx-- == np->first_rx.ex)
@@ -1800,9 +1783,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1800 np->put_rx.ex = np->first_rx.ex; 1783 np->put_rx.ex = np->first_rx.ex;
1801 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1784 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1802 np->put_rx_ctx = np->first_rx_ctx; 1785 np->put_rx_ctx = np->first_rx_ctx;
1803 } else { 1786 } else
1804 return 1; 1787 return 1;
1805 }
1806 } 1788 }
1807 return 0; 1789 return 0;
1808} 1790}
@@ -2018,24 +2000,24 @@ static void nv_legacybackoff_reseed(struct net_device *dev)
2018 2000
2019/* Known Good seed sets */ 2001/* Known Good seed sets */
2020static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2002static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2021 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2003 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2022 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2004 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2023 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2005 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2024 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2006 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2025 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2007 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2026 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2008 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2027 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2009 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2028 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 2010 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2029 2011
2030static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2012static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2031 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2013 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2032 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2014 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2033 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2015 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2034 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2016 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2035 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2017 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2036 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2018 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2037 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2019 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2038 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 2020 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2039 2021
2040static void nv_gear_backoff_reseed(struct net_device *dev) 2022static void nv_gear_backoff_reseed(struct net_device *dev)
2041{ 2023{
@@ -2083,13 +2065,12 @@ static void nv_gear_backoff_reseed(struct net_device *dev)
2083 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2065 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2084 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2066 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2085 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2067 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2086 writel(temp,base + NvRegBackOffControl); 2068 writel(temp, base + NvRegBackOffControl);
2087 2069
2088 /* Setup seeds for all gear LFSRs. */ 2070 /* Setup seeds for all gear LFSRs. */
2089 get_random_bytes(&seedset, sizeof(seedset)); 2071 get_random_bytes(&seedset, sizeof(seedset));
2090 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2072 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2091 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 2073 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2092 {
2093 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2074 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2094 temp |= main_seedset[seedset][i-1] & 0x3ff; 2075 temp |= main_seedset[seedset][i-1] & 0x3ff;
2095 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2076 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
@@ -2113,10 +2094,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2113 u32 size = skb_headlen(skb); 2094 u32 size = skb_headlen(skb);
2114 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2095 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2115 u32 empty_slots; 2096 u32 empty_slots;
2116 struct ring_desc* put_tx; 2097 struct ring_desc *put_tx;
2117 struct ring_desc* start_tx; 2098 struct ring_desc *start_tx;
2118 struct ring_desc* prev_tx; 2099 struct ring_desc *prev_tx;
2119 struct nv_skb_map* prev_tx_ctx; 2100 struct nv_skb_map *prev_tx_ctx;
2120 unsigned long flags; 2101 unsigned long flags;
2121 2102
2122 /* add fragments to entries count */ 2103 /* add fragments to entries count */
@@ -2204,18 +2185,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2204 2185
2205 spin_unlock_irqrestore(&np->lock, flags); 2186 spin_unlock_irqrestore(&np->lock, flags);
2206 2187
2207 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2208 dev->name, entries, tx_flags_extra);
2209 {
2210 int j;
2211 for (j=0; j<64; j++) {
2212 if ((j%16) == 0)
2213 dprintk("\n%03x:", j);
2214 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2215 }
2216 dprintk("\n");
2217 }
2218
2219 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2188 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2220 return NETDEV_TX_OK; 2189 return NETDEV_TX_OK;
2221} 2190}
@@ -2233,11 +2202,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2233 u32 size = skb_headlen(skb); 2202 u32 size = skb_headlen(skb);
2234 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2203 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2235 u32 empty_slots; 2204 u32 empty_slots;
2236 struct ring_desc_ex* put_tx; 2205 struct ring_desc_ex *put_tx;
2237 struct ring_desc_ex* start_tx; 2206 struct ring_desc_ex *start_tx;
2238 struct ring_desc_ex* prev_tx; 2207 struct ring_desc_ex *prev_tx;
2239 struct nv_skb_map* prev_tx_ctx; 2208 struct nv_skb_map *prev_tx_ctx;
2240 struct nv_skb_map* start_tx_ctx; 2209 struct nv_skb_map *start_tx_ctx;
2241 unsigned long flags; 2210 unsigned long flags;
2242 2211
2243 /* add fragments to entries count */ 2212 /* add fragments to entries count */
@@ -2355,18 +2324,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2355 2324
2356 spin_unlock_irqrestore(&np->lock, flags); 2325 spin_unlock_irqrestore(&np->lock, flags);
2357 2326
2358 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2359 dev->name, entries, tx_flags_extra);
2360 {
2361 int j;
2362 for (j=0; j<64; j++) {
2363 if ((j%16) == 0)
2364 dprintk("\n%03x:", j);
2365 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2366 }
2367 dprintk("\n");
2368 }
2369
2370 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2327 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2371 return NETDEV_TX_OK; 2328 return NETDEV_TX_OK;
2372} 2329}
@@ -2399,15 +2356,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
2399 struct fe_priv *np = netdev_priv(dev); 2356 struct fe_priv *np = netdev_priv(dev);
2400 u32 flags; 2357 u32 flags;
2401 int tx_work = 0; 2358 int tx_work = 0;
2402 struct ring_desc* orig_get_tx = np->get_tx.orig; 2359 struct ring_desc *orig_get_tx = np->get_tx.orig;
2403 2360
2404 while ((np->get_tx.orig != np->put_tx.orig) && 2361 while ((np->get_tx.orig != np->put_tx.orig) &&
2405 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2362 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2406 (tx_work < limit)) { 2363 (tx_work < limit)) {
2407 2364
2408 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2409 dev->name, flags);
2410
2411 nv_unmap_txskb(np, np->get_tx_ctx); 2365 nv_unmap_txskb(np, np->get_tx_ctx);
2412 2366
2413 if (np->desc_ver == DESC_VER_1) { 2367 if (np->desc_ver == DESC_VER_1) {
@@ -2464,15 +2418,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2464 struct fe_priv *np = netdev_priv(dev); 2418 struct fe_priv *np = netdev_priv(dev);
2465 u32 flags; 2419 u32 flags;
2466 int tx_work = 0; 2420 int tx_work = 0;
2467 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2421 struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2468 2422
2469 while ((np->get_tx.ex != np->put_tx.ex) && 2423 while ((np->get_tx.ex != np->put_tx.ex) &&
2470 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && 2424 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2471 (tx_work < limit)) { 2425 (tx_work < limit)) {
2472 2426
2473 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2474 dev->name, flags);
2475
2476 nv_unmap_txskb(np, np->get_tx_ctx); 2427 nv_unmap_txskb(np, np->get_tx_ctx);
2477 2428
2478 if (flags & NV_TX2_LASTPACKET) { 2429 if (flags & NV_TX2_LASTPACKET) {
@@ -2491,9 +2442,8 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2491 np->get_tx_ctx->skb = NULL; 2442 np->get_tx_ctx->skb = NULL;
2492 tx_work++; 2443 tx_work++;
2493 2444
2494 if (np->tx_limit) { 2445 if (np->tx_limit)
2495 nv_tx_flip_ownership(dev); 2446 nv_tx_flip_ownership(dev);
2496 }
2497 } 2447 }
2498 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2448 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2499 np->get_tx.ex = np->first_tx.ex; 2449 np->get_tx.ex = np->first_tx.ex;
@@ -2518,57 +2468,56 @@ static void nv_tx_timeout(struct net_device *dev)
2518 u32 status; 2468 u32 status;
2519 union ring_type put_tx; 2469 union ring_type put_tx;
2520 int saved_tx_limit; 2470 int saved_tx_limit;
2471 int i;
2521 2472
2522 if (np->msi_flags & NV_MSI_X_ENABLED) 2473 if (np->msi_flags & NV_MSI_X_ENABLED)
2523 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2474 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2524 else 2475 else
2525 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2476 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2526 2477
2527 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2478 netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
2528 2479
2529 { 2480 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2530 int i; 2481 netdev_info(dev, "Dumping tx registers\n");
2531 2482 for (i = 0; i <= np->register_size; i += 32) {
2532 printk(KERN_INFO "%s: Ring at %lx\n", 2483 netdev_info(dev,
2533 dev->name, (unsigned long)np->ring_addr); 2484 "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2534 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2485 i,
2535 for (i=0;i<=np->register_size;i+= 32) { 2486 readl(base + i + 0), readl(base + i + 4),
2536 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2487 readl(base + i + 8), readl(base + i + 12),
2537 i, 2488 readl(base + i + 16), readl(base + i + 20),
2538 readl(base + i + 0), readl(base + i + 4), 2489 readl(base + i + 24), readl(base + i + 28));
2539 readl(base + i + 8), readl(base + i + 12), 2490 }
2540 readl(base + i + 16), readl(base + i + 20), 2491 netdev_info(dev, "Dumping tx ring\n");
2541 readl(base + i + 24), readl(base + i + 28)); 2492 for (i = 0; i < np->tx_ring_size; i += 4) {
2542 } 2493 if (!nv_optimized(np)) {
2543 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2494 netdev_info(dev,
2544 for (i=0;i<np->tx_ring_size;i+= 4) { 2495 "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2545 if (!nv_optimized(np)) { 2496 i,
2546 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2497 le32_to_cpu(np->tx_ring.orig[i].buf),
2547 i, 2498 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2548 le32_to_cpu(np->tx_ring.orig[i].buf), 2499 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2549 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2500 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2550 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2501 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2551 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2502 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2552 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2503 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2553 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2504 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2554 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2505 } else {
2555 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2506 netdev_info(dev,
2556 } else { 2507 "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2557 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2508 i,
2558 i, 2509 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2559 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2510 le32_to_cpu(np->tx_ring.ex[i].buflow),
2560 le32_to_cpu(np->tx_ring.ex[i].buflow), 2511 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2561 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2512 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2562 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2513 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2563 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2514 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2564 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2515 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2565 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2516 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2566 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2517 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2567 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2518 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2568 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2519 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2569 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2520 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2570 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2571 }
2572 } 2521 }
2573 } 2522 }
2574 2523
@@ -2616,15 +2565,13 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2616 int protolen; /* length as stored in the proto field */ 2565 int protolen; /* length as stored in the proto field */
2617 2566
2618 /* 1) calculate len according to header */ 2567 /* 1) calculate len according to header */
2619 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2568 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2620 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2569 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2621 hdrlen = VLAN_HLEN; 2570 hdrlen = VLAN_HLEN;
2622 } else { 2571 } else {
2623 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2572 protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2624 hdrlen = ETH_HLEN; 2573 hdrlen = ETH_HLEN;
2625 } 2574 }
2626 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2627 dev->name, datalen, protolen, hdrlen);
2628 if (protolen > ETH_DATA_LEN) 2575 if (protolen > ETH_DATA_LEN)
2629 return datalen; /* Value in proto field not a len, no checks possible */ 2576 return datalen; /* Value in proto field not a len, no checks possible */
2630 2577
@@ -2635,26 +2582,18 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2635 /* more data on wire than in 802 header, trim of 2582 /* more data on wire than in 802 header, trim of
2636 * additional data. 2583 * additional data.
2637 */ 2584 */
2638 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2639 dev->name, protolen);
2640 return protolen; 2585 return protolen;
2641 } else { 2586 } else {
2642 /* less data on wire than mentioned in header. 2587 /* less data on wire than mentioned in header.
2643 * Discard the packet. 2588 * Discard the packet.
2644 */ 2589 */
2645 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2646 dev->name);
2647 return -1; 2590 return -1;
2648 } 2591 }
2649 } else { 2592 } else {
2650 /* short packet. Accept only if 802 values are also short */ 2593 /* short packet. Accept only if 802 values are also short */
2651 if (protolen > ETH_ZLEN) { 2594 if (protolen > ETH_ZLEN) {
2652 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2653 dev->name);
2654 return -1; 2595 return -1;
2655 } 2596 }
2656 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2657 dev->name, datalen);
2658 return datalen; 2597 return datalen;
2659 } 2598 }
2660} 2599}
@@ -2667,13 +2606,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
2667 struct sk_buff *skb; 2606 struct sk_buff *skb;
2668 int len; 2607 int len;
2669 2608
2670 while((np->get_rx.orig != np->put_rx.orig) && 2609 while ((np->get_rx.orig != np->put_rx.orig) &&
2671 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2610 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2672 (rx_work < limit)) { 2611 (rx_work < limit)) {
2673 2612
2674 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2675 dev->name, flags);
2676
2677 /* 2613 /*
2678 * the packet is for us - immediately tear down the pci mapping. 2614 * the packet is for us - immediately tear down the pci mapping.
2679 * TODO: check if a prefetch of the first cacheline improves 2615 * TODO: check if a prefetch of the first cacheline improves
@@ -2685,16 +2621,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2685 skb = np->get_rx_ctx->skb; 2621 skb = np->get_rx_ctx->skb;
2686 np->get_rx_ctx->skb = NULL; 2622 np->get_rx_ctx->skb = NULL;
2687 2623
2688 {
2689 int j;
2690 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2691 for (j=0; j<64; j++) {
2692 if ((j%16) == 0)
2693 dprintk("\n%03x:", j);
2694 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2695 }
2696 dprintk("\n");
2697 }
2698 /* look at what we actually got: */ 2624 /* look at what we actually got: */
2699 if (np->desc_ver == DESC_VER_1) { 2625 if (np->desc_ver == DESC_VER_1) {
2700 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2626 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
@@ -2710,9 +2636,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2710 } 2636 }
2711 /* framing errors are soft errors */ 2637 /* framing errors are soft errors */
2712 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2638 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2713 if (flags & NV_RX_SUBSTRACT1) { 2639 if (flags & NV_RX_SUBSTRACT1)
2714 len--; 2640 len--;
2715 }
2716 } 2641 }
2717 /* the rest are hard errors */ 2642 /* the rest are hard errors */
2718 else { 2643 else {
@@ -2745,9 +2670,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2745 } 2670 }
2746 /* framing errors are soft errors */ 2671 /* framing errors are soft errors */
2747 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2672 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2748 if (flags & NV_RX2_SUBSTRACT1) { 2673 if (flags & NV_RX2_SUBSTRACT1)
2749 len--; 2674 len--;
2750 }
2751 } 2675 }
2752 /* the rest are hard errors */ 2676 /* the rest are hard errors */
2753 else { 2677 else {
@@ -2771,8 +2695,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2771 /* got a valid packet - forward it to the network core */ 2695 /* got a valid packet - forward it to the network core */
2772 skb_put(skb, len); 2696 skb_put(skb, len);
2773 skb->protocol = eth_type_trans(skb, dev); 2697 skb->protocol = eth_type_trans(skb, dev);
2774 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2775 dev->name, len, skb->protocol);
2776 napi_gro_receive(&np->napi, skb); 2698 napi_gro_receive(&np->napi, skb);
2777 dev->stats.rx_packets++; 2699 dev->stats.rx_packets++;
2778 dev->stats.rx_bytes += len; 2700 dev->stats.rx_bytes += len;
@@ -2797,13 +2719,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2797 struct sk_buff *skb; 2719 struct sk_buff *skb;
2798 int len; 2720 int len;
2799 2721
2800 while((np->get_rx.ex != np->put_rx.ex) && 2722 while ((np->get_rx.ex != np->put_rx.ex) &&
2801 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2723 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2802 (rx_work < limit)) { 2724 (rx_work < limit)) {
2803 2725
2804 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2805 dev->name, flags);
2806
2807 /* 2726 /*
2808 * the packet is for us - immediately tear down the pci mapping. 2727 * the packet is for us - immediately tear down the pci mapping.
2809 * TODO: check if a prefetch of the first cacheline improves 2728 * TODO: check if a prefetch of the first cacheline improves
@@ -2815,16 +2734,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2815 skb = np->get_rx_ctx->skb; 2734 skb = np->get_rx_ctx->skb;
2816 np->get_rx_ctx->skb = NULL; 2735 np->get_rx_ctx->skb = NULL;
2817 2736
2818 {
2819 int j;
2820 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2821 for (j=0; j<64; j++) {
2822 if ((j%16) == 0)
2823 dprintk("\n%03x:", j);
2824 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2825 }
2826 dprintk("\n");
2827 }
2828 /* look at what we actually got: */ 2737 /* look at what we actually got: */
2829 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2738 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2830 len = flags & LEN_MASK_V2; 2739 len = flags & LEN_MASK_V2;
@@ -2838,9 +2747,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2838 } 2747 }
2839 /* framing errors are soft errors */ 2748 /* framing errors are soft errors */
2840 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2749 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2841 if (flags & NV_RX2_SUBSTRACT1) { 2750 if (flags & NV_RX2_SUBSTRACT1)
2842 len--; 2751 len--;
2843 }
2844 } 2752 }
2845 /* the rest are hard errors */ 2753 /* the rest are hard errors */
2846 else { 2754 else {
@@ -2858,9 +2766,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2858 skb->protocol = eth_type_trans(skb, dev); 2766 skb->protocol = eth_type_trans(skb, dev);
2859 prefetch(skb->data); 2767 prefetch(skb->data);
2860 2768
2861 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2862 dev->name, len, skb->protocol);
2863
2864 if (likely(!np->vlangrp)) { 2769 if (likely(!np->vlangrp)) {
2865 napi_gro_receive(&np->napi, skb); 2770 napi_gro_receive(&np->napi, skb);
2866 } else { 2771 } else {
@@ -2949,7 +2854,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2949 /* reinit nic view of the rx queue */ 2854 /* reinit nic view of the rx queue */
2950 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2855 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2951 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2856 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2952 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2857 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2953 base + NvRegRingSizes); 2858 base + NvRegRingSizes);
2954 pci_push(base); 2859 pci_push(base);
2955 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2860 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -2986,7 +2891,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
2986static int nv_set_mac_address(struct net_device *dev, void *addr) 2891static int nv_set_mac_address(struct net_device *dev, void *addr)
2987{ 2892{
2988 struct fe_priv *np = netdev_priv(dev); 2893 struct fe_priv *np = netdev_priv(dev);
2989 struct sockaddr *macaddr = (struct sockaddr*)addr; 2894 struct sockaddr *macaddr = (struct sockaddr *)addr;
2990 2895
2991 if (!is_valid_ether_addr(macaddr->sa_data)) 2896 if (!is_valid_ether_addr(macaddr->sa_data))
2992 return -EADDRNOTAVAIL; 2897 return -EADDRNOTAVAIL;
@@ -3076,8 +2981,6 @@ static void nv_set_multicast(struct net_device *dev)
3076 writel(mask[0], base + NvRegMulticastMaskA); 2981 writel(mask[0], base + NvRegMulticastMaskA);
3077 writel(mask[1], base + NvRegMulticastMaskB); 2982 writel(mask[1], base + NvRegMulticastMaskB);
3078 writel(pff, base + NvRegPacketFilterFlags); 2983 writel(pff, base + NvRegPacketFilterFlags);
3079 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
3080 dev->name);
3081 nv_start_rx(dev); 2984 nv_start_rx(dev);
3082 spin_unlock_irq(&np->lock); 2985 spin_unlock_irq(&np->lock);
3083} 2986}
@@ -3152,8 +3055,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3152 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3055 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3153 3056
3154 if (!(mii_status & BMSR_LSTATUS)) { 3057 if (!(mii_status & BMSR_LSTATUS)) {
3155 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
3156 dev->name);
3157 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3058 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3158 newdup = 0; 3059 newdup = 0;
3159 retval = 0; 3060 retval = 0;
@@ -3161,8 +3062,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3161 } 3062 }
3162 3063
3163 if (np->autoneg == 0) { 3064 if (np->autoneg == 0) {
3164 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3165 dev->name, np->fixed_mode);
3166 if (np->fixed_mode & LPA_100FULL) { 3065 if (np->fixed_mode & LPA_100FULL) {
3167 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3066 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3168 newdup = 1; 3067 newdup = 1;
@@ -3185,14 +3084,11 @@ static int nv_update_linkspeed(struct net_device *dev)
3185 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3084 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3186 newdup = 0; 3085 newdup = 0;
3187 retval = 0; 3086 retval = 0;
3188 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
3189 goto set_speed; 3087 goto set_speed;
3190 } 3088 }
3191 3089
3192 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3090 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3193 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3091 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3194 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3195 dev->name, adv, lpa);
3196 3092
3197 retval = 1; 3093 retval = 1;
3198 if (np->gigabit == PHY_GIGABIT) { 3094 if (np->gigabit == PHY_GIGABIT) {
@@ -3201,8 +3097,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3201 3097
3202 if ((control_1000 & ADVERTISE_1000FULL) && 3098 if ((control_1000 & ADVERTISE_1000FULL) &&
3203 (status_1000 & LPA_1000FULL)) { 3099 (status_1000 & LPA_1000FULL)) {
3204 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
3205 dev->name);
3206 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3100 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3207 newdup = 1; 3101 newdup = 1;
3208 goto set_speed; 3102 goto set_speed;
@@ -3224,7 +3118,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3224 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3118 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3225 newdup = 0; 3119 newdup = 0;
3226 } else { 3120 } else {
3227 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
3228 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3121 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3229 newdup = 0; 3122 newdup = 0;
3230 } 3123 }
@@ -3233,9 +3126,6 @@ set_speed:
3233 if (np->duplex == newdup && np->linkspeed == newls) 3126 if (np->duplex == newdup && np->linkspeed == newls)
3234 return retval; 3127 return retval;
3235 3128
3236 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
3237 dev->name, np->linkspeed, np->duplex, newls, newdup);
3238
3239 np->duplex = newdup; 3129 np->duplex = newdup;
3240 np->linkspeed = newls; 3130 np->linkspeed = newls;
3241 3131
@@ -3302,7 +3192,7 @@ set_speed:
3302 } 3192 }
3303 writel(txreg, base + NvRegTxWatermark); 3193 writel(txreg, base + NvRegTxWatermark);
3304 3194
3305 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3195 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3306 base + NvRegMisc1); 3196 base + NvRegMisc1);
3307 pci_push(base); 3197 pci_push(base);
3308 writel(np->linkspeed, base + NvRegLinkSpeed); 3198 writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -3312,8 +3202,8 @@ set_speed:
3312 /* setup pause frame */ 3202 /* setup pause frame */
3313 if (np->duplex != 0) { 3203 if (np->duplex != 0) {
3314 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3204 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3315 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3205 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3316 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3206 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3317 3207
3318 switch (adv_pause) { 3208 switch (adv_pause) {
3319 case ADVERTISE_PAUSE_CAP: 3209 case ADVERTISE_PAUSE_CAP:
@@ -3324,22 +3214,17 @@ set_speed:
3324 } 3214 }
3325 break; 3215 break;
3326 case ADVERTISE_PAUSE_ASYM: 3216 case ADVERTISE_PAUSE_ASYM:
3327 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3217 if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3328 {
3329 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3218 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3330 }
3331 break; 3219 break;
3332 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3220 case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3333 if (lpa_pause & LPA_PAUSE_CAP) 3221 if (lpa_pause & LPA_PAUSE_CAP) {
3334 {
3335 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3222 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3336 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3223 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3337 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3224 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3338 } 3225 }
3339 if (lpa_pause == LPA_PAUSE_ASYM) 3226 if (lpa_pause == LPA_PAUSE_ASYM)
3340 {
3341 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3227 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3342 }
3343 break; 3228 break;
3344 } 3229 }
3345 } else { 3230 } else {
@@ -3361,14 +3246,14 @@ static void nv_linkchange(struct net_device *dev)
3361 if (nv_update_linkspeed(dev)) { 3246 if (nv_update_linkspeed(dev)) {
3362 if (!netif_carrier_ok(dev)) { 3247 if (!netif_carrier_ok(dev)) {
3363 netif_carrier_on(dev); 3248 netif_carrier_on(dev);
3364 printk(KERN_INFO "%s: link up.\n", dev->name); 3249 netdev_info(dev, "link up\n");
3365 nv_txrx_gate(dev, false); 3250 nv_txrx_gate(dev, false);
3366 nv_start_rx(dev); 3251 nv_start_rx(dev);
3367 } 3252 }
3368 } else { 3253 } else {
3369 if (netif_carrier_ok(dev)) { 3254 if (netif_carrier_ok(dev)) {
3370 netif_carrier_off(dev); 3255 netif_carrier_off(dev);
3371 printk(KERN_INFO "%s: link down.\n", dev->name); 3256 netdev_info(dev, "link down\n");
3372 nv_txrx_gate(dev, true); 3257 nv_txrx_gate(dev, true);
3373 nv_stop_rx(dev); 3258 nv_stop_rx(dev);
3374 } 3259 }
@@ -3382,11 +3267,9 @@ static void nv_link_irq(struct net_device *dev)
3382 3267
3383 miistat = readl(base + NvRegMIIStatus); 3268 miistat = readl(base + NvRegMIIStatus);
3384 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3269 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3385 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
3386 3270
3387 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3271 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3388 nv_linkchange(dev); 3272 nv_linkchange(dev);
3389 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
3390} 3273}
3391 3274
3392static void nv_msi_workaround(struct fe_priv *np) 3275static void nv_msi_workaround(struct fe_priv *np)
@@ -3437,8 +3320,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3437 struct fe_priv *np = netdev_priv(dev); 3320 struct fe_priv *np = netdev_priv(dev);
3438 u8 __iomem *base = get_hwbase(dev); 3321 u8 __iomem *base = get_hwbase(dev);
3439 3322
3440 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3441
3442 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3323 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3443 np->events = readl(base + NvRegIrqStatus); 3324 np->events = readl(base + NvRegIrqStatus);
3444 writel(np->events, base + NvRegIrqStatus); 3325 writel(np->events, base + NvRegIrqStatus);
@@ -3446,7 +3327,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3446 np->events = readl(base + NvRegMSIXIrqStatus); 3327 np->events = readl(base + NvRegMSIXIrqStatus);
3447 writel(np->events, base + NvRegMSIXIrqStatus); 3328 writel(np->events, base + NvRegMSIXIrqStatus);
3448 } 3329 }
3449 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3450 if (!(np->events & np->irqmask)) 3330 if (!(np->events & np->irqmask))
3451 return IRQ_NONE; 3331 return IRQ_NONE;
3452 3332
@@ -3460,8 +3340,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3460 __napi_schedule(&np->napi); 3340 __napi_schedule(&np->napi);
3461 } 3341 }
3462 3342
3463 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3464
3465 return IRQ_HANDLED; 3343 return IRQ_HANDLED;
3466} 3344}
3467 3345
@@ -3476,8 +3354,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3476 struct fe_priv *np = netdev_priv(dev); 3354 struct fe_priv *np = netdev_priv(dev);
3477 u8 __iomem *base = get_hwbase(dev); 3355 u8 __iomem *base = get_hwbase(dev);
3478 3356
3479 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3480
3481 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3357 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3482 np->events = readl(base + NvRegIrqStatus); 3358 np->events = readl(base + NvRegIrqStatus);
3483 writel(np->events, base + NvRegIrqStatus); 3359 writel(np->events, base + NvRegIrqStatus);
@@ -3485,7 +3361,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3485 np->events = readl(base + NvRegMSIXIrqStatus); 3361 np->events = readl(base + NvRegMSIXIrqStatus);
3486 writel(np->events, base + NvRegMSIXIrqStatus); 3362 writel(np->events, base + NvRegMSIXIrqStatus);
3487 } 3363 }
3488 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3489 if (!(np->events & np->irqmask)) 3364 if (!(np->events & np->irqmask))
3490 return IRQ_NONE; 3365 return IRQ_NONE;
3491 3366
@@ -3498,7 +3373,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3498 writel(0, base + NvRegIrqMask); 3373 writel(0, base + NvRegIrqMask);
3499 __napi_schedule(&np->napi); 3374 __napi_schedule(&np->napi);
3500 } 3375 }
3501 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3502 3376
3503 return IRQ_HANDLED; 3377 return IRQ_HANDLED;
3504} 3378}
@@ -3512,12 +3386,9 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3512 int i; 3386 int i;
3513 unsigned long flags; 3387 unsigned long flags;
3514 3388
3515 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3389 for (i = 0;; i++) {
3516
3517 for (i=0; ; i++) {
3518 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3390 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3519 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3391 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3520 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3521 if (!(events & np->irqmask)) 3392 if (!(events & np->irqmask))
3522 break; 3393 break;
3523 3394
@@ -3536,12 +3407,12 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3536 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3407 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3537 } 3408 }
3538 spin_unlock_irqrestore(&np->lock, flags); 3409 spin_unlock_irqrestore(&np->lock, flags);
3539 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3410 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3411 __func__, i);
3540 break; 3412 break;
3541 } 3413 }
3542 3414
3543 } 3415 }
3544 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3545 3416
3546 return IRQ_RETVAL(i); 3417 return IRQ_RETVAL(i);
3547} 3418}
@@ -3553,7 +3424,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3553 u8 __iomem *base = get_hwbase(dev); 3424 u8 __iomem *base = get_hwbase(dev);
3554 unsigned long flags; 3425 unsigned long flags;
3555 int retcode; 3426 int retcode;
3556 int rx_count, tx_work=0, rx_work=0; 3427 int rx_count, tx_work = 0, rx_work = 0;
3557 3428
3558 do { 3429 do {
3559 if (!nv_optimized(np)) { 3430 if (!nv_optimized(np)) {
@@ -3626,12 +3497,9 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3626 int i; 3497 int i;
3627 unsigned long flags; 3498 unsigned long flags;
3628 3499
3629 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3500 for (i = 0;; i++) {
3630
3631 for (i=0; ; i++) {
3632 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3501 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3633 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3502 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3634 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3635 if (!(events & np->irqmask)) 3503 if (!(events & np->irqmask))
3636 break; 3504 break;
3637 3505
@@ -3655,11 +3523,11 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3655 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3523 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3656 } 3524 }
3657 spin_unlock_irqrestore(&np->lock, flags); 3525 spin_unlock_irqrestore(&np->lock, flags);
3658 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3526 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3527 __func__, i);
3659 break; 3528 break;
3660 } 3529 }
3661 } 3530 }
3662 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3663 3531
3664 return IRQ_RETVAL(i); 3532 return IRQ_RETVAL(i);
3665} 3533}
@@ -3673,12 +3541,9 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3673 int i; 3541 int i;
3674 unsigned long flags; 3542 unsigned long flags;
3675 3543
3676 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3544 for (i = 0;; i++) {
3677
3678 for (i=0; ; i++) {
3679 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3545 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3680 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3546 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3681 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3682 if (!(events & np->irqmask)) 3547 if (!(events & np->irqmask))
3683 break; 3548 break;
3684 3549
@@ -3723,12 +3588,12 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3723 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3588 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3724 } 3589 }
3725 spin_unlock_irqrestore(&np->lock, flags); 3590 spin_unlock_irqrestore(&np->lock, flags);
3726 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3591 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3592 __func__, i);
3727 break; 3593 break;
3728 } 3594 }
3729 3595
3730 } 3596 }
3731 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3732 3597
3733 return IRQ_RETVAL(i); 3598 return IRQ_RETVAL(i);
3734} 3599}
@@ -3740,8 +3605,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3740 u8 __iomem *base = get_hwbase(dev); 3605 u8 __iomem *base = get_hwbase(dev);
3741 u32 events; 3606 u32 events;
3742 3607
3743 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3744
3745 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3608 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3746 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3609 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3747 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3610 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
@@ -3750,7 +3613,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3750 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3613 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3751 } 3614 }
3752 pci_push(base); 3615 pci_push(base);
3753 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3754 if (!(events & NVREG_IRQ_TIMER)) 3616 if (!(events & NVREG_IRQ_TIMER))
3755 return IRQ_RETVAL(0); 3617 return IRQ_RETVAL(0);
3756 3618
@@ -3760,8 +3622,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3760 np->intr_test = 1; 3622 np->intr_test = 1;
3761 spin_unlock(&np->lock); 3623 spin_unlock(&np->lock);
3762 3624
3763 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3764
3765 return IRQ_RETVAL(1); 3625 return IRQ_RETVAL(1);
3766} 3626}
3767 3627
@@ -3776,17 +3636,15 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3776 * the remaining 8 interrupts. 3636 * the remaining 8 interrupts.
3777 */ 3637 */
3778 for (i = 0; i < 8; i++) { 3638 for (i = 0; i < 8; i++) {
3779 if ((irqmask >> i) & 0x1) { 3639 if ((irqmask >> i) & 0x1)
3780 msixmap |= vector << (i << 2); 3640 msixmap |= vector << (i << 2);
3781 }
3782 } 3641 }
3783 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3642 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3784 3643
3785 msixmap = 0; 3644 msixmap = 0;
3786 for (i = 0; i < 8; i++) { 3645 for (i = 0; i < 8; i++) {
3787 if ((irqmask >> (i + 8)) & 0x1) { 3646 if ((irqmask >> (i + 8)) & 0x1)
3788 msixmap |= vector << (i << 2); 3647 msixmap |= vector << (i << 2);
3789 }
3790 } 3648 }
3791 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3649 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3792} 3650}
@@ -3809,17 +3667,19 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3809 } 3667 }
3810 3668
3811 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3669 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3812 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3670 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3813 np->msi_x_entry[i].entry = i; 3671 np->msi_x_entry[i].entry = i;
3814 } 3672 ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
3815 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3673 if (ret == 0) {
3816 np->msi_flags |= NV_MSI_X_ENABLED; 3674 np->msi_flags |= NV_MSI_X_ENABLED;
3817 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3675 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3818 /* Request irq for rx handling */ 3676 /* Request irq for rx handling */
3819 sprintf(np->name_rx, "%s-rx", dev->name); 3677 sprintf(np->name_rx, "%s-rx", dev->name);
3820 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 3678 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3821 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { 3679 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3822 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3680 netdev_info(dev,
3681 "request_irq failed for rx %d\n",
3682 ret);
3823 pci_disable_msix(np->pci_dev); 3683 pci_disable_msix(np->pci_dev);
3824 np->msi_flags &= ~NV_MSI_X_ENABLED; 3684 np->msi_flags &= ~NV_MSI_X_ENABLED;
3825 goto out_err; 3685 goto out_err;
@@ -3828,7 +3688,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3828 sprintf(np->name_tx, "%s-tx", dev->name); 3688 sprintf(np->name_tx, "%s-tx", dev->name);
3829 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 3689 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3830 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { 3690 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3831 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3691 netdev_info(dev,
3692 "request_irq failed for tx %d\n",
3693 ret);
3832 pci_disable_msix(np->pci_dev); 3694 pci_disable_msix(np->pci_dev);
3833 np->msi_flags &= ~NV_MSI_X_ENABLED; 3695 np->msi_flags &= ~NV_MSI_X_ENABLED;
3834 goto out_free_rx; 3696 goto out_free_rx;
@@ -3837,7 +3699,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3837 sprintf(np->name_other, "%s-other", dev->name); 3699 sprintf(np->name_other, "%s-other", dev->name);
3838 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 3700 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3839 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { 3701 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3840 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3702 netdev_info(dev,
3703 "request_irq failed for link %d\n",
3704 ret);
3841 pci_disable_msix(np->pci_dev); 3705 pci_disable_msix(np->pci_dev);
3842 np->msi_flags &= ~NV_MSI_X_ENABLED; 3706 np->msi_flags &= ~NV_MSI_X_ENABLED;
3843 goto out_free_tx; 3707 goto out_free_tx;
@@ -3851,7 +3715,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3851 } else { 3715 } else {
3852 /* Request irq for all interrupts */ 3716 /* Request irq for all interrupts */
3853 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 3717 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3854 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3718 netdev_info(dev,
3719 "request_irq failed %d\n",
3720 ret);
3855 pci_disable_msix(np->pci_dev); 3721 pci_disable_msix(np->pci_dev);
3856 np->msi_flags &= ~NV_MSI_X_ENABLED; 3722 np->msi_flags &= ~NV_MSI_X_ENABLED;
3857 goto out_err; 3723 goto out_err;
@@ -3864,11 +3730,13 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3864 } 3730 }
3865 } 3731 }
3866 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3732 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3867 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3733 ret = pci_enable_msi(np->pci_dev);
3734 if (ret == 0) {
3868 np->msi_flags |= NV_MSI_ENABLED; 3735 np->msi_flags |= NV_MSI_ENABLED;
3869 dev->irq = np->pci_dev->irq; 3736 dev->irq = np->pci_dev->irq;
3870 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3737 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3871 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3738 netdev_info(dev, "request_irq failed %d\n",
3739 ret);
3872 pci_disable_msi(np->pci_dev); 3740 pci_disable_msi(np->pci_dev);
3873 np->msi_flags &= ~NV_MSI_ENABLED; 3741 np->msi_flags &= ~NV_MSI_ENABLED;
3874 dev->irq = np->pci_dev->irq; 3742 dev->irq = np->pci_dev->irq;
@@ -3903,9 +3771,8 @@ static void nv_free_irq(struct net_device *dev)
3903 int i; 3771 int i;
3904 3772
3905 if (np->msi_flags & NV_MSI_X_ENABLED) { 3773 if (np->msi_flags & NV_MSI_X_ENABLED) {
3906 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3774 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3907 free_irq(np->msi_x_entry[i].vector, dev); 3775 free_irq(np->msi_x_entry[i].vector, dev);
3908 }
3909 pci_disable_msix(np->pci_dev); 3776 pci_disable_msix(np->pci_dev);
3910 np->msi_flags &= ~NV_MSI_X_ENABLED; 3777 np->msi_flags &= ~NV_MSI_X_ENABLED;
3911 } else { 3778 } else {
@@ -3954,7 +3821,7 @@ static void nv_do_nic_poll(unsigned long data)
3954 3821
3955 if (np->recover_error) { 3822 if (np->recover_error) {
3956 np->recover_error = 0; 3823 np->recover_error = 0;
3957 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name); 3824 netdev_info(dev, "MAC in recoverable error state\n");
3958 if (netif_running(dev)) { 3825 if (netif_running(dev)) {
3959 netif_tx_lock_bh(dev); 3826 netif_tx_lock_bh(dev);
3960 netif_addr_lock(dev); 3827 netif_addr_lock(dev);
@@ -3975,7 +3842,7 @@ static void nv_do_nic_poll(unsigned long data)
3975 /* reinit nic view of the rx queue */ 3842 /* reinit nic view of the rx queue */
3976 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3843 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3977 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3844 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3978 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3845 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3979 base + NvRegRingSizes); 3846 base + NvRegRingSizes);
3980 pci_push(base); 3847 pci_push(base);
3981 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3848 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4105,7 +3972,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4105 } 3972 }
4106 3973
4107 if (netif_carrier_ok(dev)) { 3974 if (netif_carrier_ok(dev)) {
4108 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 3975 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4109 case NVREG_LINKSPEED_10: 3976 case NVREG_LINKSPEED_10:
4110 ecmd->speed = SPEED_10; 3977 ecmd->speed = SPEED_10;
4111 break; 3978 break;
@@ -4250,14 +4117,14 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4250 } 4117 }
4251 4118
4252 if (netif_running(dev)) 4119 if (netif_running(dev))
4253 printk(KERN_INFO "%s: link down.\n", dev->name); 4120 netdev_info(dev, "link down\n");
4254 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4121 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4255 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4122 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4256 bmcr |= BMCR_ANENABLE; 4123 bmcr |= BMCR_ANENABLE;
4257 /* reset the phy in order for settings to stick, 4124 /* reset the phy in order for settings to stick,
4258 * and cause autoneg to start */ 4125 * and cause autoneg to start */
4259 if (phy_reset(dev, bmcr)) { 4126 if (phy_reset(dev, bmcr)) {
4260 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4127 netdev_info(dev, "phy reset failed\n");
4261 return -EINVAL; 4128 return -EINVAL;
4262 } 4129 }
4263 } else { 4130 } else {
@@ -4306,7 +4173,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4306 if (np->phy_oui == PHY_OUI_MARVELL) { 4173 if (np->phy_oui == PHY_OUI_MARVELL) {
4307 /* reset the phy in order for forced mode settings to stick */ 4174 /* reset the phy in order for forced mode settings to stick */
4308 if (phy_reset(dev, bmcr)) { 4175 if (phy_reset(dev, bmcr)) {
4309 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4176 netdev_info(dev, "phy reset failed\n");
4310 return -EINVAL; 4177 return -EINVAL;
4311 } 4178 }
4312 } else { 4179 } else {
@@ -4344,7 +4211,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
4344 4211
4345 regs->version = FORCEDETH_REGS_VER; 4212 regs->version = FORCEDETH_REGS_VER;
4346 spin_lock_irq(&np->lock); 4213 spin_lock_irq(&np->lock);
4347 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4214 for (i = 0; i <= np->register_size/sizeof(u32); i++)
4348 rbuf[i] = readl(base + i*sizeof(u32)); 4215 rbuf[i] = readl(base + i*sizeof(u32));
4349 spin_unlock_irq(&np->lock); 4216 spin_unlock_irq(&np->lock);
4350} 4217}
@@ -4368,7 +4235,7 @@ static int nv_nway_reset(struct net_device *dev)
4368 spin_unlock(&np->lock); 4235 spin_unlock(&np->lock);
4369 netif_addr_unlock(dev); 4236 netif_addr_unlock(dev);
4370 netif_tx_unlock_bh(dev); 4237 netif_tx_unlock_bh(dev);
4371 printk(KERN_INFO "%s: link down.\n", dev->name); 4238 netdev_info(dev, "link down\n");
4372 } 4239 }
4373 4240
4374 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4241 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -4376,7 +4243,7 @@ static int nv_nway_reset(struct net_device *dev)
4376 bmcr |= BMCR_ANENABLE; 4243 bmcr |= BMCR_ANENABLE;
4377 /* reset the phy in order for settings to stick*/ 4244 /* reset the phy in order for settings to stick*/
4378 if (phy_reset(dev, bmcr)) { 4245 if (phy_reset(dev, bmcr)) {
4379 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4246 netdev_info(dev, "phy reset failed\n");
4380 return -EINVAL; 4247 return -EINVAL;
4381 } 4248 }
4382 } else { 4249 } else {
@@ -4464,10 +4331,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4464 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4331 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4465 rxtx_ring, ring_addr); 4332 rxtx_ring, ring_addr);
4466 } 4333 }
4467 if (rx_skbuff) 4334
4468 kfree(rx_skbuff); 4335 kfree(rx_skbuff);
4469 if (tx_skbuff) 4336 kfree(tx_skbuff);
4470 kfree(tx_skbuff);
4471 goto exit; 4337 goto exit;
4472 } 4338 }
4473 4339
@@ -4491,14 +4357,14 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4491 np->tx_ring_size = ring->tx_pending; 4357 np->tx_ring_size = ring->tx_pending;
4492 4358
4493 if (!nv_optimized(np)) { 4359 if (!nv_optimized(np)) {
4494 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4360 np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4495 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4361 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4496 } else { 4362 } else {
4497 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4363 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4498 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4364 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4499 } 4365 }
4500 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4366 np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4501 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4367 np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4502 np->ring_addr = ring_addr; 4368 np->ring_addr = ring_addr;
4503 4369
4504 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4370 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
@@ -4515,7 +4381,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4515 /* reinit nic view of the queues */ 4381 /* reinit nic view of the queues */
4516 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4382 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4517 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4383 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4518 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4384 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4519 base + NvRegRingSizes); 4385 base + NvRegRingSizes);
4520 pci_push(base); 4386 pci_push(base);
4521 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4387 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4550,12 +4416,11 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4550 4416
4551 if ((!np->autoneg && np->duplex == 0) || 4417 if ((!np->autoneg && np->duplex == 0) ||
4552 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4418 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4553 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4419 netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4554 dev->name);
4555 return -EINVAL; 4420 return -EINVAL;
4556 } 4421 }
4557 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4422 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4558 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4423 netdev_info(dev, "hardware does not support tx pause frames\n");
4559 return -EINVAL; 4424 return -EINVAL;
4560 } 4425 }
4561 4426
@@ -4590,7 +4455,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4590 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4455 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4591 4456
4592 if (netif_running(dev)) 4457 if (netif_running(dev))
4593 printk(KERN_INFO "%s: link down.\n", dev->name); 4458 netdev_info(dev, "link down\n");
4594 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4459 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4595 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4460 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4596 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4461 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
@@ -4841,7 +4706,7 @@ static int nv_loopback_test(struct net_device *dev)
4841 /* reinit nic view of the rx queue */ 4706 /* reinit nic view of the rx queue */
4842 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4707 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4843 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4708 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4844 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4709 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4845 base + NvRegRingSizes); 4710 base + NvRegRingSizes);
4846 pci_push(base); 4711 pci_push(base);
4847 4712
@@ -4852,8 +4717,7 @@ static int nv_loopback_test(struct net_device *dev)
4852 pkt_len = ETH_DATA_LEN; 4717 pkt_len = ETH_DATA_LEN;
4853 tx_skb = dev_alloc_skb(pkt_len); 4718 tx_skb = dev_alloc_skb(pkt_len);
4854 if (!tx_skb) { 4719 if (!tx_skb) {
4855 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 4720 netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
4856 " of %s\n", dev->name);
4857 ret = 0; 4721 ret = 0;
4858 goto out; 4722 goto out;
4859 } 4723 }
@@ -4893,29 +4757,22 @@ static int nv_loopback_test(struct net_device *dev)
4893 if (flags & NV_RX_ERROR) 4757 if (flags & NV_RX_ERROR)
4894 ret = 0; 4758 ret = 0;
4895 } else { 4759 } else {
4896 if (flags & NV_RX2_ERROR) { 4760 if (flags & NV_RX2_ERROR)
4897 ret = 0; 4761 ret = 0;
4898 }
4899 } 4762 }
4900 4763
4901 if (ret) { 4764 if (ret) {
4902 if (len != pkt_len) { 4765 if (len != pkt_len) {
4903 ret = 0; 4766 ret = 0;
4904 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4905 dev->name, len, pkt_len);
4906 } else { 4767 } else {
4907 rx_skb = np->rx_skb[0].skb; 4768 rx_skb = np->rx_skb[0].skb;
4908 for (i = 0; i < pkt_len; i++) { 4769 for (i = 0; i < pkt_len; i++) {
4909 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4770 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4910 ret = 0; 4771 ret = 0;
4911 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4912 dev->name, i);
4913 break; 4772 break;
4914 } 4773 }
4915 } 4774 }
4916 } 4775 }
4917 } else {
4918 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4919 } 4776 }
4920 4777
4921 pci_unmap_single(np->pci_dev, test_dma_addr, 4778 pci_unmap_single(np->pci_dev, test_dma_addr,
@@ -4958,11 +4815,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4958 netif_addr_lock(dev); 4815 netif_addr_lock(dev);
4959 spin_lock_irq(&np->lock); 4816 spin_lock_irq(&np->lock);
4960 nv_disable_hw_interrupts(dev, np->irqmask); 4817 nv_disable_hw_interrupts(dev, np->irqmask);
4961 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4818 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4962 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4819 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4963 } else { 4820 else
4964 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4821 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4965 }
4966 /* stop engines */ 4822 /* stop engines */
4967 nv_stop_rxtx(dev); 4823 nv_stop_rxtx(dev);
4968 nv_txrx_reset(dev); 4824 nv_txrx_reset(dev);
@@ -5003,7 +4859,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5003 /* reinit nic view of the rx queue */ 4859 /* reinit nic view of the rx queue */
5004 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4860 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5005 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4861 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5006 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4862 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5007 base + NvRegRingSizes); 4863 base + NvRegRingSizes);
5008 pci_push(base); 4864 pci_push(base);
5009 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4865 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -5106,8 +4962,7 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
5106 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 4962 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5107 np->mgmt_sema = 1; 4963 np->mgmt_sema = 1;
5108 return 1; 4964 return 1;
5109 } 4965 } else
5110 else
5111 udelay(50); 4966 udelay(50);
5112 } 4967 }
5113 4968
@@ -5167,8 +5022,6 @@ static int nv_open(struct net_device *dev)
5167 int oom, i; 5022 int oom, i;
5168 u32 low; 5023 u32 low;
5169 5024
5170 dprintk(KERN_DEBUG "nv_open: begin\n");
5171
5172 /* power up phy */ 5025 /* power up phy */
5173 mii_rw(dev, np->phyaddr, MII_BMCR, 5026 mii_rw(dev, np->phyaddr, MII_BMCR,
5174 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); 5027 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
@@ -5204,7 +5057,7 @@ static int nv_open(struct net_device *dev)
5204 5057
5205 /* give hw rings */ 5058 /* give hw rings */
5206 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5059 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5207 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5060 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5208 base + NvRegRingSizes); 5061 base + NvRegRingSizes);
5209 5062
5210 writel(np->linkspeed, base + NvRegLinkSpeed); 5063 writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -5216,9 +5069,11 @@ static int nv_open(struct net_device *dev)
5216 writel(np->vlanctl_bits, base + NvRegVlanControl); 5069 writel(np->vlanctl_bits, base + NvRegVlanControl);
5217 pci_push(base); 5070 pci_push(base);
5218 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5071 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5219 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5072 if (reg_delay(dev, NvRegUnknownSetupReg5,
5220 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 5073 NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5221 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 5074 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5075 netdev_info(dev,
5076 "%s: SetupReg5, Bit 31 remained off\n", __func__);
5222 5077
5223 writel(0, base + NvRegMIIMask); 5078 writel(0, base + NvRegMIIMask);
5224 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5079 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
@@ -5251,8 +5106,7 @@ static int nv_open(struct net_device *dev)
5251 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5106 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5252 else 5107 else
5253 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5108 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5254 } 5109 } else
5255 else
5256 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5110 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5257 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5111 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5258 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5112 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
@@ -5263,7 +5117,7 @@ static int nv_open(struct net_device *dev)
5263 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5117 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5264 5118
5265 i = readl(base + NvRegPowerState); 5119 i = readl(base + NvRegPowerState);
5266 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5120 if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5267 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5121 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5268 5122
5269 pci_push(base); 5123 pci_push(base);
@@ -5276,9 +5130,8 @@ static int nv_open(struct net_device *dev)
5276 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5130 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5277 pci_push(base); 5131 pci_push(base);
5278 5132
5279 if (nv_request_irq(dev, 0)) { 5133 if (nv_request_irq(dev, 0))
5280 goto out_drain; 5134 goto out_drain;
5281 }
5282 5135
5283 /* ask for interrupts */ 5136 /* ask for interrupts */
5284 nv_enable_hw_interrupts(dev, np->irqmask); 5137 nv_enable_hw_interrupts(dev, np->irqmask);
@@ -5296,7 +5149,6 @@ static int nv_open(struct net_device *dev)
5296 u32 miistat; 5149 u32 miistat;
5297 miistat = readl(base + NvRegMIIStatus); 5150 miistat = readl(base + NvRegMIIStatus);
5298 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5151 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5299 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
5300 } 5152 }
5301 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5153 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5302 * to init hw */ 5154 * to init hw */
@@ -5309,7 +5161,7 @@ static int nv_open(struct net_device *dev)
5309 if (ret) { 5161 if (ret) {
5310 netif_carrier_on(dev); 5162 netif_carrier_on(dev);
5311 } else { 5163 } else {
5312 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 5164 netdev_info(dev, "no link during initialization\n");
5313 netif_carrier_off(dev); 5165 netif_carrier_off(dev);
5314 } 5166 }
5315 if (oom) 5167 if (oom)
@@ -5352,7 +5204,6 @@ static int nv_close(struct net_device *dev)
5352 base = get_hwbase(dev); 5204 base = get_hwbase(dev);
5353 nv_disable_hw_interrupts(dev, np->irqmask); 5205 nv_disable_hw_interrupts(dev, np->irqmask);
5354 pci_push(base); 5206 pci_push(base);
5355 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5356 5207
5357 spin_unlock_irq(&np->lock); 5208 spin_unlock_irq(&np->lock);
5358 5209
@@ -5421,8 +5272,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5421 static int printed_version; 5272 static int printed_version;
5422 5273
5423 if (!printed_version++) 5274 if (!printed_version++)
5424 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5275 pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5425 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5276 FORCEDETH_VERSION);
5426 5277
5427 dev = alloc_etherdev(sizeof(struct fe_priv)); 5278 dev = alloc_etherdev(sizeof(struct fe_priv));
5428 err = -ENOMEM; 5279 err = -ENOMEM;
@@ -5465,10 +5316,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5465 err = -EINVAL; 5316 err = -EINVAL;
5466 addr = 0; 5317 addr = 0;
5467 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5318 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5468 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5469 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5470 pci_resource_len(pci_dev, i),
5471 pci_resource_flags(pci_dev, i));
5472 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5319 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5473 pci_resource_len(pci_dev, i) >= np->register_size) { 5320 pci_resource_len(pci_dev, i) >= np->register_size) {
5474 addr = pci_resource_start(pci_dev, i); 5321 addr = pci_resource_start(pci_dev, i);
@@ -5476,8 +5323,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5476 } 5323 }
5477 } 5324 }
5478 if (i == DEVICE_COUNT_RESOURCE) { 5325 if (i == DEVICE_COUNT_RESOURCE) {
5479 dev_printk(KERN_INFO, &pci_dev->dev, 5326 dev_info(&pci_dev->dev, "Couldn't find register window\n");
5480 "Couldn't find register window\n");
5481 goto out_relreg; 5327 goto out_relreg;
5482 } 5328 }
5483 5329
@@ -5493,13 +5339,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5493 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5339 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5494 if (dma_64bit) { 5340 if (dma_64bit) {
5495 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) 5341 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5496 dev_printk(KERN_INFO, &pci_dev->dev, 5342 dev_info(&pci_dev->dev,
5497 "64-bit DMA failed, using 32-bit addressing\n"); 5343 "64-bit DMA failed, using 32-bit addressing\n");
5498 else 5344 else
5499 dev->features |= NETIF_F_HIGHDMA; 5345 dev->features |= NETIF_F_HIGHDMA;
5500 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { 5346 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5501 dev_printk(KERN_INFO, &pci_dev->dev, 5347 dev_info(&pci_dev->dev,
5502 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5348 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5503 } 5349 }
5504 } 5350 }
5505 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5351 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
@@ -5620,7 +5466,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5620 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5466 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5621 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5467 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5622 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5468 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5623 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n"); 5469 dev_dbg(&pci_dev->dev,
5470 "%s: set workaround bit for reversed mac addr\n",
5471 __func__);
5624 } 5472 }
5625 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5473 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5626 5474
@@ -5629,17 +5477,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5629 * Bad mac address. At least one bios sets the mac address 5477 * Bad mac address. At least one bios sets the mac address
5630 * to 01:23:45:67:89:ab 5478 * to 01:23:45:67:89:ab
5631 */ 5479 */
5632 dev_printk(KERN_ERR, &pci_dev->dev, 5480 dev_err(&pci_dev->dev,
5633 "Invalid Mac address detected: %pM\n", 5481 "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5634 dev->dev_addr); 5482 dev->dev_addr);
5635 dev_printk(KERN_ERR, &pci_dev->dev,
5636 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5637 random_ether_addr(dev->dev_addr); 5483 random_ether_addr(dev->dev_addr);
5484 dev_err(&pci_dev->dev,
5485 "Using random MAC address: %pM\n", dev->dev_addr);
5638 } 5486 }
5639 5487
5640 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
5641 pci_name(pci_dev), dev->dev_addr);
5642
5643 /* set mac address */ 5488 /* set mac address */
5644 nv_copy_mac_to_hw(dev); 5489 nv_copy_mac_to_hw(dev);
5645 5490
@@ -5663,16 +5508,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5663 writel(powerstate, base + NvRegPowerState2); 5508 writel(powerstate, base + NvRegPowerState2);
5664 } 5509 }
5665 5510
5666 if (np->desc_ver == DESC_VER_1) { 5511 if (np->desc_ver == DESC_VER_1)
5667 np->tx_flags = NV_TX_VALID; 5512 np->tx_flags = NV_TX_VALID;
5668 } else { 5513 else
5669 np->tx_flags = NV_TX2_VALID; 5514 np->tx_flags = NV_TX2_VALID;
5670 }
5671 5515
5672 np->msi_flags = 0; 5516 np->msi_flags = 0;
5673 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5517 if ((id->driver_data & DEV_HAS_MSI) && msi)
5674 np->msi_flags |= NV_MSI_CAPABLE; 5518 np->msi_flags |= NV_MSI_CAPABLE;
5675 } 5519
5676 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5520 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5677 /* msix has had reported issues when modifying irqmask 5521 /* msix has had reported issues when modifying irqmask
5678 as in the case of napi, therefore, disable for now 5522 as in the case of napi, therefore, disable for now
@@ -5702,11 +5546,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5702 if (id->driver_data & DEV_NEED_TIMERIRQ) 5546 if (id->driver_data & DEV_NEED_TIMERIRQ)
5703 np->irqmask |= NVREG_IRQ_TIMER; 5547 np->irqmask |= NVREG_IRQ_TIMER;
5704 if (id->driver_data & DEV_NEED_LINKTIMER) { 5548 if (id->driver_data & DEV_NEED_LINKTIMER) {
5705 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5706 np->need_linktimer = 1; 5549 np->need_linktimer = 1;
5707 np->link_timeout = jiffies + LINK_TIMEOUT; 5550 np->link_timeout = jiffies + LINK_TIMEOUT;
5708 } else { 5551 } else {
5709 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5710 np->need_linktimer = 0; 5552 np->need_linktimer = 0;
5711 } 5553 }
5712 5554
@@ -5735,19 +5577,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5735 nv_mgmt_acquire_sema(dev) && 5577 nv_mgmt_acquire_sema(dev) &&
5736 nv_mgmt_get_version(dev)) { 5578 nv_mgmt_get_version(dev)) {
5737 np->mac_in_use = 1; 5579 np->mac_in_use = 1;
5738 if (np->mgmt_version > 0) { 5580 if (np->mgmt_version > 0)
5739 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5581 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5740 }
5741 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
5742 pci_name(pci_dev), np->mac_in_use);
5743 /* management unit setup the phy already? */ 5582 /* management unit setup the phy already? */
5744 if (np->mac_in_use && 5583 if (np->mac_in_use &&
5745 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5584 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5746 NVREG_XMITCTL_SYNC_PHY_INIT)) { 5585 NVREG_XMITCTL_SYNC_PHY_INIT)) {
5747 /* phy is inited by mgmt unit */ 5586 /* phy is inited by mgmt unit */
5748 phyinitialized = 1; 5587 phyinitialized = 1;
5749 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
5750 pci_name(pci_dev));
5751 } else { 5588 } else {
5752 /* we need to init the phy */ 5589 /* we need to init the phy */
5753 } 5590 }
@@ -5773,8 +5610,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5773 np->phy_model = id2 & PHYID2_MODEL_MASK; 5610 np->phy_model = id2 & PHYID2_MODEL_MASK;
5774 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5611 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5775 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5612 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5776 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5777 pci_name(pci_dev), id1, id2, phyaddr);
5778 np->phyaddr = phyaddr; 5613 np->phyaddr = phyaddr;
5779 np->phy_oui = id1 | id2; 5614 np->phy_oui = id1 | id2;
5780 5615
@@ -5788,8 +5623,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5788 break; 5623 break;
5789 } 5624 }
5790 if (i == 33) { 5625 if (i == 33) {
5791 dev_printk(KERN_INFO, &pci_dev->dev, 5626 dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
5792 "open: Could not find a valid PHY.\n");
5793 goto out_error; 5627 goto out_error;
5794 } 5628 }
5795 5629
@@ -5799,9 +5633,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5799 } else { 5633 } else {
5800 /* see if it is a gigabit phy */ 5634 /* see if it is a gigabit phy */
5801 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5635 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5802 if (mii_status & PHY_GIGABIT) { 5636 if (mii_status & PHY_GIGABIT)
5803 np->gigabit = PHY_GIGABIT; 5637 np->gigabit = PHY_GIGABIT;
5804 }
5805 } 5638 }
5806 5639
5807 /* set default link speed settings */ 5640 /* set default link speed settings */
@@ -5811,37 +5644,27 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5811 5644
5812 err = register_netdev(dev); 5645 err = register_netdev(dev);
5813 if (err) { 5646 if (err) {
5814 dev_printk(KERN_INFO, &pci_dev->dev, 5647 dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
5815 "unable to register netdev: %d\n", err);
5816 goto out_error; 5648 goto out_error;
5817 } 5649 }
5818 5650
5819 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 5651 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5820 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 5652 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5821 dev->name, 5653
5822 np->phy_oui, 5654 dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5823 np->phyaddr, 5655 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5824 dev->dev_addr[0], 5656 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5825 dev->dev_addr[1], 5657 "csum " : "",
5826 dev->dev_addr[2], 5658 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5827 dev->dev_addr[3], 5659 "vlan " : "",
5828 dev->dev_addr[4], 5660 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5829 dev->dev_addr[5]); 5661 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5830 5662 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5831 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5663 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5832 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5664 np->need_linktimer ? "lnktim " : "",
5833 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 5665 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5834 "csum " : "", 5666 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5835 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5667 np->desc_ver);
5836 "vlan " : "",
5837 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5838 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5839 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5840 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5841 np->need_linktimer ? "lnktim " : "",
5842 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5843 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5844 np->desc_ver);
5845 5668
5846 return 0; 5669 return 0;
5847 5670
@@ -5931,13 +5754,13 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5931 int i; 5754 int i;
5932 5755
5933 if (netif_running(dev)) { 5756 if (netif_running(dev)) {
5934 // Gross. 5757 /* Gross. */
5935 nv_close(dev); 5758 nv_close(dev);
5936 } 5759 }
5937 netif_device_detach(dev); 5760 netif_device_detach(dev);
5938 5761
5939 /* save non-pci configuration space */ 5762 /* save non-pci configuration space */
5940 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5763 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5941 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5764 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5942 5765
5943 pci_save_state(pdev); 5766 pci_save_state(pdev);
@@ -5960,7 +5783,7 @@ static int nv_resume(struct pci_dev *pdev)
5960 pci_enable_wake(pdev, PCI_D0, 0); 5783 pci_enable_wake(pdev, PCI_D0, 0);
5961 5784
5962 /* restore non-pci configuration space */ 5785 /* restore non-pci configuration space */
5963 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5786 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5964 writel(np->saved_config_space[i], base+i*sizeof(u32)); 5787 writel(np->saved_config_space[i], base+i*sizeof(u32));
5965 5788
5966 if (np->driver_data & DEV_NEED_MSI_FIX) 5789 if (np->driver_data & DEV_NEED_MSI_FIX)
@@ -5990,9 +5813,8 @@ static void nv_shutdown(struct pci_dev *pdev)
5990 * If we really go for poweroff, we must not restore the MAC, 5813 * If we really go for poweroff, we must not restore the MAC,
5991 * otherwise the MAC for WOL will be reversed at least on some boards. 5814 * otherwise the MAC for WOL will be reversed at least on some boards.
5992 */ 5815 */
5993 if (system_state != SYSTEM_POWER_OFF) { 5816 if (system_state != SYSTEM_POWER_OFF)
5994 nv_restore_mac_addr(pdev); 5817 nv_restore_mac_addr(pdev);
5995 }
5996 5818
5997 pci_disable_device(pdev); 5819 pci_disable_device(pdev);
5998 /* 5820 /*
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 892d196f17a..67ea262e482 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -2436,10 +2436,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2436 int size; 2436 int size;
2437 2437
2438 size = sizeof(struct igb_buffer) * tx_ring->count; 2438 size = sizeof(struct igb_buffer) * tx_ring->count;
2439 tx_ring->buffer_info = vmalloc(size); 2439 tx_ring->buffer_info = vzalloc(size);
2440 if (!tx_ring->buffer_info) 2440 if (!tx_ring->buffer_info)
2441 goto err; 2441 goto err;
2442 memset(tx_ring->buffer_info, 0, size);
2443 2442
2444 /* round up to nearest 4K */ 2443 /* round up to nearest 4K */
2445 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2444 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -2587,10 +2586,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2587 int size, desc_len; 2586 int size, desc_len;
2588 2587
2589 size = sizeof(struct igb_buffer) * rx_ring->count; 2588 size = sizeof(struct igb_buffer) * rx_ring->count;
2590 rx_ring->buffer_info = vmalloc(size); 2589 rx_ring->buffer_info = vzalloc(size);
2591 if (!rx_ring->buffer_info) 2590 if (!rx_ring->buffer_info)
2592 goto err; 2591 goto err;
2593 memset(rx_ring->buffer_info, 0, size);
2594 2592
2595 desc_len = sizeof(union e1000_adv_rx_desc); 2593 desc_len = sizeof(union e1000_adv_rx_desc);
2596 2594
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile
index c2f150d8f2d..0fa3db3dd8b 100644
--- a/drivers/net/igbvf/Makefile
+++ b/drivers/net/igbvf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel(R) 82576 Virtual Function Linux driver 3# Intel(R) 82576 Virtual Function Linux driver
4# Copyright(c) 2009 Intel Corporation. 4# Copyright(c) 2009 - 2010 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h
index 88a47537518..79f2604673f 100644
--- a/drivers/net/igbvf/defines.h
+++ b/drivers/net/igbvf/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 33add708bcb..abb3606928f 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index debeee2dc71..9d4d63e536d 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -126,7 +126,6 @@ struct igbvf_buffer {
126 unsigned int page_offset; 126 unsigned int page_offset;
127 }; 127 };
128 }; 128 };
129 struct page *page;
130}; 129};
131 130
132union igbvf_desc { 131union igbvf_desc {
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c
index 819a8ec901d..3d6f4cc3998 100644
--- a/drivers/net/igbvf/mbx.c
+++ b/drivers/net/igbvf/mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h
index 4938609dbfb..c2883c45d47 100644
--- a/drivers/net/igbvf/mbx.h
+++ b/drivers/net/igbvf/mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 28af019c97b..8dbde2397c1 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -44,12 +44,13 @@
44 44
45#include "igbvf.h" 45#include "igbvf.h"
46 46
47#define DRV_VERSION "1.0.0-k0" 47#define DRV_VERSION "1.0.8-k0"
48char igbvf_driver_name[] = "igbvf"; 48char igbvf_driver_name[] = "igbvf";
49const char igbvf_driver_version[] = DRV_VERSION; 49const char igbvf_driver_version[] = DRV_VERSION;
50static const char igbvf_driver_string[] = 50static const char igbvf_driver_string[] =
51 "Intel(R) Virtual Function Network Driver"; 51 "Intel(R) Virtual Function Network Driver";
52static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 52static const char igbvf_copyright[] =
53 "Copyright (c) 2009 - 2010 Intel Corporation.";
53 54
54static int igbvf_poll(struct napi_struct *napi, int budget); 55static int igbvf_poll(struct napi_struct *napi, int budget);
55static void igbvf_reset(struct igbvf_adapter *); 56static void igbvf_reset(struct igbvf_adapter *);
@@ -429,10 +430,9 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
429 int size; 430 int size;
430 431
431 size = sizeof(struct igbvf_buffer) * tx_ring->count; 432 size = sizeof(struct igbvf_buffer) * tx_ring->count;
432 tx_ring->buffer_info = vmalloc(size); 433 tx_ring->buffer_info = vzalloc(size);
433 if (!tx_ring->buffer_info) 434 if (!tx_ring->buffer_info)
434 goto err; 435 goto err;
435 memset(tx_ring->buffer_info, 0, size);
436 436
437 /* round up to nearest 4K */ 437 /* round up to nearest 4K */
438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -469,10 +469,9 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
469 int size, desc_len; 469 int size, desc_len;
470 470
471 size = sizeof(struct igbvf_buffer) * rx_ring->count; 471 size = sizeof(struct igbvf_buffer) * rx_ring->count;
472 rx_ring->buffer_info = vmalloc(size); 472 rx_ring->buffer_info = vzalloc(size);
473 if (!rx_ring->buffer_info) 473 if (!rx_ring->buffer_info)
474 goto err; 474 goto err;
475 memset(rx_ring->buffer_info, 0, size);
476 475
477 desc_len = sizeof(union e1000_adv_rx_desc); 476 desc_len = sizeof(union e1000_adv_rx_desc);
478 477
@@ -1851,8 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
1851 1850
1852 if (link) { 1851 if (link) {
1853 if (!netif_carrier_ok(netdev)) { 1852 if (!netif_carrier_ok(netdev)) {
1854 bool txb2b = 1;
1855
1856 mac->ops.get_link_up_info(&adapter->hw, 1853 mac->ops.get_link_up_info(&adapter->hw,
1857 &adapter->link_speed, 1854 &adapter->link_speed,
1858 &adapter->link_duplex); 1855 &adapter->link_duplex);
@@ -1862,11 +1859,9 @@ static void igbvf_watchdog_task(struct work_struct *work)
1862 adapter->tx_timeout_factor = 1; 1859 adapter->tx_timeout_factor = 1;
1863 switch (adapter->link_speed) { 1860 switch (adapter->link_speed) {
1864 case SPEED_10: 1861 case SPEED_10:
1865 txb2b = 0;
1866 adapter->tx_timeout_factor = 16; 1862 adapter->tx_timeout_factor = 16;
1867 break; 1863 break;
1868 case SPEED_100: 1864 case SPEED_100:
1869 txb2b = 0;
1870 /* maybe add some timeout factor ? */ 1865 /* maybe add some timeout factor ? */
1871 break; 1866 break;
1872 } 1867 }
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h
index b9e24ed70d0..77e18d3d6b1 100644
--- a/drivers/net/igbvf/regs.h
+++ b/drivers/net/igbvf/regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index a9a61efa964..0cc13c6ed41 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
index 1e8ce3741a6..c36ea21f17f 100644
--- a/drivers/net/igbvf/vf.h
+++ b/drivers/net/igbvf/vf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 8df645e78f2..38e15be6d51 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1605,7 +1605,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1605 } 1605 }
1606 veth_dev[i] = dev; 1606 veth_dev[i] = dev;
1607 1607
1608 port = (struct veth_port*)netdev_priv(dev); 1608 port = netdev_priv(dev);
1609 1609
1610 /* Start the state machine on each connection on this vlan. If we're 1610 /* Start the state machine on each connection on this vlan. If we're
1611 * the first dev to do so this will commence link negotiation */ 1611 * the first dev to do so this will commence link negotiation */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index caa8192fff2..211a1694667 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -669,13 +669,12 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
669 int size; 669 int size;
670 670
671 size = sizeof(struct ixgb_buffer) * txdr->count; 671 size = sizeof(struct ixgb_buffer) * txdr->count;
672 txdr->buffer_info = vmalloc(size); 672 txdr->buffer_info = vzalloc(size);
673 if (!txdr->buffer_info) { 673 if (!txdr->buffer_info) {
674 netif_err(adapter, probe, adapter->netdev, 674 netif_err(adapter, probe, adapter->netdev,
675 "Unable to allocate transmit descriptor ring memory\n"); 675 "Unable to allocate transmit descriptor ring memory\n");
676 return -ENOMEM; 676 return -ENOMEM;
677 } 677 }
678 memset(txdr->buffer_info, 0, size);
679 678
680 /* round up to nearest 4K */ 679 /* round up to nearest 4K */
681 680
@@ -759,13 +758,12 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
759 int size; 758 int size;
760 759
761 size = sizeof(struct ixgb_buffer) * rxdr->count; 760 size = sizeof(struct ixgb_buffer) * rxdr->count;
762 rxdr->buffer_info = vmalloc(size); 761 rxdr->buffer_info = vzalloc(size);
763 if (!rxdr->buffer_info) { 762 if (!rxdr->buffer_info) {
764 netif_err(adapter, probe, adapter->netdev, 763 netif_err(adapter, probe, adapter->netdev,
765 "Unable to allocate receive descriptor ring\n"); 764 "Unable to allocate receive descriptor ring\n");
766 return -ENOMEM; 765 return -ENOMEM;
767 } 766 }
768 memset(rxdr->buffer_info, 0, size);
769 767
770 /* Round up to nearest 4K */ 768 /* Round up to nearest 4K */
771 769
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 8f81efb4916..7d7387fbdec 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o 37 ixgbe_mbx.o ixgbe_x540.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ed8703cfffb..3ae30b8cb7d 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -61,10 +61,8 @@
61#define IXGBE_MIN_RXD 64 61#define IXGBE_MIN_RXD 64
62 62
63/* flow control */ 63/* flow control */
64#define IXGBE_DEFAULT_FCRTL 0x10000
65#define IXGBE_MIN_FCRTL 0x40 64#define IXGBE_MIN_FCRTL 0x40
66#define IXGBE_MAX_FCRTL 0x7FF80 65#define IXGBE_MAX_FCRTL 0x7FF80
67#define IXGBE_DEFAULT_FCRTH 0x20000
68#define IXGBE_MIN_FCRTH 0x600 66#define IXGBE_MIN_FCRTH 0x600
69#define IXGBE_MAX_FCRTH 0x7FFF0 67#define IXGBE_MAX_FCRTH 0x7FFF0
70#define IXGBE_DEFAULT_FCPAUSE 0xFFFF 68#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
@@ -130,7 +128,9 @@ struct ixgbe_tx_buffer {
130 unsigned long time_stamp; 128 unsigned long time_stamp;
131 u16 length; 129 u16 length;
132 u16 next_to_watch; 130 u16 next_to_watch;
133 u16 mapped_as_page; 131 unsigned int bytecount;
132 u16 gso_segs;
133 u8 mapped_as_page;
134}; 134};
135 135
136struct ixgbe_rx_buffer { 136struct ixgbe_rx_buffer {
@@ -146,12 +146,56 @@ struct ixgbe_queue_stats {
146 u64 bytes; 146 u64 bytes;
147}; 147};
148 148
149struct ixgbe_tx_queue_stats {
150 u64 restart_queue;
151 u64 tx_busy;
152 u64 completed;
153 u64 tx_done_old;
154};
155
156struct ixgbe_rx_queue_stats {
157 u64 rsc_count;
158 u64 rsc_flush;
159 u64 non_eop_descs;
160 u64 alloc_rx_page_failed;
161 u64 alloc_rx_buff_failed;
162};
163
164enum ixbge_ring_state_t {
165 __IXGBE_TX_FDIR_INIT_DONE,
166 __IXGBE_TX_DETECT_HANG,
167 __IXGBE_HANG_CHECK_ARMED,
168 __IXGBE_RX_PS_ENABLED,
169 __IXGBE_RX_RSC_ENABLED,
170};
171
172#define ring_is_ps_enabled(ring) \
173 test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
174#define set_ring_ps_enabled(ring) \
175 set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
176#define clear_ring_ps_enabled(ring) \
177 clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
178#define check_for_tx_hang(ring) \
179 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
180#define set_check_for_tx_hang(ring) \
181 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
182#define clear_check_for_tx_hang(ring) \
183 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
184#define ring_is_rsc_enabled(ring) \
185 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
186#define set_ring_rsc_enabled(ring) \
187 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
188#define clear_ring_rsc_enabled(ring) \
189 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
149struct ixgbe_ring { 190struct ixgbe_ring {
150 void *desc; /* descriptor ring memory */ 191 void *desc; /* descriptor ring memory */
192 struct device *dev; /* device for DMA mapping */
193 struct net_device *netdev; /* netdev ring belongs to */
151 union { 194 union {
152 struct ixgbe_tx_buffer *tx_buffer_info; 195 struct ixgbe_tx_buffer *tx_buffer_info;
153 struct ixgbe_rx_buffer *rx_buffer_info; 196 struct ixgbe_rx_buffer *rx_buffer_info;
154 }; 197 };
198 unsigned long state;
155 u8 atr_sample_rate; 199 u8 atr_sample_rate;
156 u8 atr_count; 200 u8 atr_count;
157 u16 count; /* amount of descriptors */ 201 u16 count; /* amount of descriptors */
@@ -160,38 +204,30 @@ struct ixgbe_ring {
160 u16 next_to_clean; 204 u16 next_to_clean;
161 205
162 u8 queue_index; /* needed for multiqueue queue management */ 206 u8 queue_index; /* needed for multiqueue queue management */
163 207 u8 reg_idx; /* holds the special value that gets
164#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
165 u8 flags; /* per ring feature flags */
166 u16 head;
167 u16 tail;
168
169 unsigned int total_bytes;
170 unsigned int total_packets;
171
172#ifdef CONFIG_IXGBE_DCA
173 /* cpu for tx queue */
174 int cpu;
175#endif
176
177 u16 work_limit; /* max work per interrupt */
178 u16 reg_idx; /* holds the special value that gets
179 * the hardware register offset 208 * the hardware register offset
180 * associated with this ring, which is 209 * associated with this ring, which is
181 * different for DCB and RSS modes 210 * different for DCB and RSS modes
182 */ 211 */
183 212
213 u16 work_limit; /* max work per interrupt */
214
215 u8 __iomem *tail;
216
217 unsigned int total_bytes;
218 unsigned int total_packets;
219
184 struct ixgbe_queue_stats stats; 220 struct ixgbe_queue_stats stats;
185 struct u64_stats_sync syncp; 221 struct u64_stats_sync syncp;
222 union {
223 struct ixgbe_tx_queue_stats tx_stats;
224 struct ixgbe_rx_queue_stats rx_stats;
225 };
186 int numa_node; 226 int numa_node;
187 unsigned long reinit_state;
188 u64 rsc_count; /* stat for coalesced packets */
189 u64 rsc_flush; /* stats for flushed packets */
190 u32 restart_queue; /* track tx queue restarts */
191 u32 non_eop_descs; /* track hardware descriptor chaining */
192
193 unsigned int size; /* length in bytes */ 227 unsigned int size; /* length in bytes */
194 dma_addr_t dma; /* phys. address of descriptor ring */ 228 dma_addr_t dma; /* phys. address of descriptor ring */
229 struct rcu_head rcu;
230 struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
195} ____cacheline_internodealigned_in_smp; 231} ____cacheline_internodealigned_in_smp;
196 232
197enum ixgbe_ring_f_enum { 233enum ixgbe_ring_f_enum {
@@ -237,6 +273,9 @@ struct ixgbe_q_vector {
237 unsigned int v_idx; /* index of q_vector within array, also used for 273 unsigned int v_idx; /* index of q_vector within array, also used for
238 * finding the bit in EICR and friends that 274 * finding the bit in EICR and friends that
239 * represents the vector for this ring */ 275 * represents the vector for this ring */
276#ifdef CONFIG_IXGBE_DCA
277 int cpu; /* CPU for DCA */
278#endif
240 struct napi_struct napi; 279 struct napi_struct napi;
241 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ 280 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
242 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ 281 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -246,6 +285,7 @@ struct ixgbe_q_vector {
246 u8 rx_itr; 285 u8 rx_itr;
247 u32 eitr; 286 u32 eitr;
248 cpumask_var_t affinity_mask; 287 cpumask_var_t affinity_mask;
288 char name[IFNAMSIZ + 9];
249}; 289};
250 290
251/* Helper macros to switch between ints/sec and what the register uses. 291/* Helper macros to switch between ints/sec and what the register uses.
@@ -294,7 +334,6 @@ struct ixgbe_adapter {
294 u16 bd_number; 334 u16 bd_number;
295 struct work_struct reset_task; 335 struct work_struct reset_task;
296 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 336 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
297 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
298 struct ixgbe_dcb_config dcb_cfg; 337 struct ixgbe_dcb_config dcb_cfg;
299 struct ixgbe_dcb_config temp_dcb_cfg; 338 struct ixgbe_dcb_config temp_dcb_cfg;
300 u8 dcb_set_bitmap; 339 u8 dcb_set_bitmap;
@@ -417,6 +456,7 @@ struct ixgbe_adapter {
417 int node; 456 int node;
418 struct work_struct check_overtemp_task; 457 struct work_struct check_overtemp_task;
419 u32 interrupt_event; 458 u32 interrupt_event;
459 char lsc_int_name[IFNAMSIZ + 9];
420 460
421 /* SR-IOV */ 461 /* SR-IOV */
422 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 462 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -428,17 +468,25 @@ enum ixbge_state_t {
428 __IXGBE_TESTING, 468 __IXGBE_TESTING,
429 __IXGBE_RESETTING, 469 __IXGBE_RESETTING,
430 __IXGBE_DOWN, 470 __IXGBE_DOWN,
431 __IXGBE_FDIR_INIT_DONE,
432 __IXGBE_SFP_MODULE_NOT_FOUND 471 __IXGBE_SFP_MODULE_NOT_FOUND
433}; 472};
434 473
474struct ixgbe_rsc_cb {
475 dma_addr_t dma;
476 u16 skb_cnt;
477 bool delay_unmap;
478};
479#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
480
435enum ixgbe_boards { 481enum ixgbe_boards {
436 board_82598, 482 board_82598,
437 board_82599, 483 board_82599,
484 board_X540,
438}; 485};
439 486
440extern struct ixgbe_info ixgbe_82598_info; 487extern struct ixgbe_info ixgbe_82598_info;
441extern struct ixgbe_info ixgbe_82599_info; 488extern struct ixgbe_info ixgbe_82599_info;
489extern struct ixgbe_info ixgbe_X540_info;
442#ifdef CONFIG_IXGBE_DCB 490#ifdef CONFIG_IXGBE_DCB
443extern const struct dcbnl_rtnl_ops dcbnl_ops; 491extern const struct dcbnl_rtnl_ops dcbnl_ops;
444extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 492extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
@@ -454,26 +502,24 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter);
454extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 502extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
455extern void ixgbe_reset(struct ixgbe_adapter *adapter); 503extern void ixgbe_reset(struct ixgbe_adapter *adapter);
456extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 504extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
457extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 505extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
458extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 506extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
459extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 507extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
460extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 508extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
461extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 509extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
462extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 510extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
463extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 511extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
464extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 512extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
465extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 513extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
466extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, 514extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
467 struct net_device *,
468 struct ixgbe_adapter *, 515 struct ixgbe_adapter *,
469 struct ixgbe_ring *); 516 struct ixgbe_ring *);
470extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *, 517extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
471 struct ixgbe_tx_buffer *); 518 struct ixgbe_tx_buffer *);
472extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 519extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
473 struct ixgbe_ring *rx_ring,
474 int cleaned_count);
475extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 520extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
476extern int ethtool_ioctl(struct ifreq *ifr); 521extern int ethtool_ioctl(struct ifreq *ifr);
522extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
477extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 523extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
478extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 524extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
479extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 525extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
@@ -498,6 +544,10 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
498 u16 flex_byte); 544 u16 flex_byte);
499extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, 545extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
500 u8 l4type); 546 u8 l4type);
547extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
548 struct ixgbe_ring *ring);
549extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
550 struct ixgbe_ring *ring);
501extern void ixgbe_set_rx_mode(struct net_device *netdev); 551extern void ixgbe_set_rx_mode(struct net_device *netdev);
502#ifdef IXGBE_FCOE 552#ifdef IXGBE_FCOE
503extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 553extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 9c02d6014cc..d0f1d9d2c41 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -38,9 +38,6 @@
38#define IXGBE_82598_MC_TBL_SIZE 128 38#define IXGBE_82598_MC_TBL_SIZE 128
39#define IXGBE_82598_VFT_TBL_SIZE 128 39#define IXGBE_82598_VFT_TBL_SIZE 128
40 40
41static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed,
43 bool *autoneg);
44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 41static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed, 42 ixgbe_link_speed speed,
46 bool autoneg, 43 bool autoneg,
@@ -156,7 +153,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
156 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 153 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
157 mac->ops.setup_link = &ixgbe_setup_copper_link_82598; 154 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
158 mac->ops.get_link_capabilities = 155 mac->ops.get_link_capabilities =
159 &ixgbe_get_copper_link_capabilities_82598; 156 &ixgbe_get_copper_link_capabilities_generic;
160 } 157 }
161 158
162 switch (hw->phy.type) { 159 switch (hw->phy.type) {
@@ -274,37 +271,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
274} 271}
275 272
276/** 273/**
277 * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
278 * @hw: pointer to hardware structure
279 * @speed: pointer to link speed
280 * @autoneg: boolean auto-negotiation value
281 *
282 * Determines the link capabilities by reading the AUTOC register.
283 **/
284static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
285 ixgbe_link_speed *speed,
286 bool *autoneg)
287{
288 s32 status = IXGBE_ERR_LINK_SETUP;
289 u16 speed_ability;
290
291 *speed = 0;
292 *autoneg = true;
293
294 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
295 &speed_ability);
296
297 if (status == 0) {
298 if (speed_ability & MDIO_SPEED_10G)
299 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
300 if (speed_ability & MDIO_PMA_SPEED_1000)
301 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
302 }
303
304 return status;
305}
306
307/**
308 * ixgbe_get_media_type_82598 - Determines media type 274 * ixgbe_get_media_type_82598 - Determines media type
309 * @hw: pointer to hardware structure 275 * @hw: pointer to hardware structure
310 * 276 *
@@ -357,6 +323,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
357 u32 fctrl_reg; 323 u32 fctrl_reg;
358 u32 rmcs_reg; 324 u32 rmcs_reg;
359 u32 reg; 325 u32 reg;
326 u32 rx_pba_size;
360 u32 link_speed = 0; 327 u32 link_speed = 0;
361 bool link_up; 328 bool link_up;
362 329
@@ -459,16 +426,18 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
459 426
460 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 427 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
461 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 428 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
462 if (hw->fc.send_xon) { 429 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
463 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 430 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
464 (hw->fc.low_water | IXGBE_FCRTL_XONE)); 431
465 } else { 432 reg = (rx_pba_size - hw->fc.low_water) << 6;
466 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 433 if (hw->fc.send_xon)
467 hw->fc.low_water); 434 reg |= IXGBE_FCRTL_XONE;
468 } 435 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
436
437 reg = (rx_pba_size - hw->fc.high_water) << 10;
438 reg |= IXGBE_FCRTH_FCEN;
469 439
470 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), 440 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
471 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
472 } 441 }
473 442
474 /* Configure pause time (2 TCs per register) */ 443 /* Configure pause time (2 TCs per register) */
@@ -1222,6 +1191,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1222static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1191static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1223 .init_params = &ixgbe_init_eeprom_params_generic, 1192 .init_params = &ixgbe_init_eeprom_params_generic,
1224 .read = &ixgbe_read_eerd_generic, 1193 .read = &ixgbe_read_eerd_generic,
1194 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
1225 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 1195 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1226 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1196 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1227}; 1197};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 0bd8fbb5bfd..6827dddc383 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -56,9 +56,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
56 ixgbe_link_speed speed, 56 ixgbe_link_speed speed,
57 bool autoneg, 57 bool autoneg,
58 bool autoneg_wait_to_complete); 58 bool autoneg_wait_to_complete);
59static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
60 ixgbe_link_speed *speed,
61 bool *autoneg);
62static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 59static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
63 ixgbe_link_speed speed, 60 ixgbe_link_speed speed,
64 bool autoneg, 61 bool autoneg,
@@ -68,9 +65,9 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
68static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 65static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
69{ 66{
70 struct ixgbe_mac_info *mac = &hw->mac; 67 struct ixgbe_mac_info *mac = &hw->mac;
71 if (hw->phy.multispeed_fiber) { 68
72 /* Set up dual speed SFP+ support */ 69 /* enable the laser control functions for SFP+ fiber */
73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 70 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
74 mac->ops.disable_tx_laser = 71 mac->ops.disable_tx_laser =
75 &ixgbe_disable_tx_laser_multispeed_fiber; 72 &ixgbe_disable_tx_laser_multispeed_fiber;
76 mac->ops.enable_tx_laser = 73 mac->ops.enable_tx_laser =
@@ -80,6 +77,12 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
80 mac->ops.disable_tx_laser = NULL; 77 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL; 78 mac->ops.enable_tx_laser = NULL;
82 mac->ops.flap_tx_laser = NULL; 79 mac->ops.flap_tx_laser = NULL;
80 }
81
82 if (hw->phy.multispeed_fiber) {
83 /* Set up dual speed SFP+ support */
84 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
85 } else {
83 if ((mac->ops.get_media_type(hw) == 86 if ((mac->ops.get_media_type(hw) ==
84 ixgbe_media_type_backplane) && 87 ixgbe_media_type_backplane) &&
85 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 88 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -93,6 +96,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
93static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 96static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
94{ 97{
95 s32 ret_val = 0; 98 s32 ret_val = 0;
99 u32 reg_anlp1 = 0;
100 u32 i = 0;
96 u16 list_offset, data_offset, data_value; 101 u16 list_offset, data_offset, data_value;
97 102
98 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 103 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -119,14 +124,34 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
119 IXGBE_WRITE_FLUSH(hw); 124 IXGBE_WRITE_FLUSH(hw);
120 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 125 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
121 } 126 }
122 /* Now restart DSP by setting Restart_AN */
123 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
124 (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
125 127
126 /* Release the semaphore */ 128 /* Release the semaphore */
127 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 129 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
128 /* Delay obtaining semaphore again to allow FW access */ 130 /* Delay obtaining semaphore again to allow FW access */
129 msleep(hw->eeprom.semaphore_delay); 131 msleep(hw->eeprom.semaphore_delay);
132
133 /* Now restart DSP by setting Restart_AN and clearing LMS */
134 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
135 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
136 IXGBE_AUTOC_AN_RESTART));
137
138 /* Wait for AN to leave state 0 */
139 for (i = 0; i < 10; i++) {
140 msleep(4);
141 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
142 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
143 break;
144 }
145 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
146 hw_dbg(hw, "sfp module setup not complete\n");
147 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
148 goto setup_sfp_out;
149 }
150
151 /* Restart DSP by setting Restart_AN and return to SFI mode */
152 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
153 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
154 IXGBE_AUTOC_AN_RESTART));
130 } 155 }
131 156
132setup_sfp_out: 157setup_sfp_out:
@@ -174,7 +199,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
174 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 199 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
175 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 200 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
176 mac->ops.get_link_capabilities = 201 mac->ops.get_link_capabilities =
177 &ixgbe_get_copper_link_capabilities_82599; 202 &ixgbe_get_copper_link_capabilities_generic;
178 } 203 }
179 204
180 /* Set necessary function pointers based on phy type */ 205 /* Set necessary function pointers based on phy type */
@@ -184,6 +209,10 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
184 phy->ops.get_firmware_version = 209 phy->ops.get_firmware_version =
185 &ixgbe_get_phy_firmware_version_tnx; 210 &ixgbe_get_phy_firmware_version_tnx;
186 break; 211 break;
212 case ixgbe_phy_aq:
213 phy->ops.get_firmware_version =
214 &ixgbe_get_phy_firmware_version_generic;
215 break;
187 default: 216 default:
188 break; 217 break;
189 } 218 }
@@ -290,37 +319,6 @@ out:
290} 319}
291 320
292/** 321/**
293 * ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities
294 * @hw: pointer to hardware structure
295 * @speed: pointer to link speed
296 * @autoneg: boolean auto-negotiation value
297 *
298 * Determines the link capabilities by reading the AUTOC register.
299 **/
300static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
301 ixgbe_link_speed *speed,
302 bool *autoneg)
303{
304 s32 status = IXGBE_ERR_LINK_SETUP;
305 u16 speed_ability;
306
307 *speed = 0;
308 *autoneg = true;
309
310 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
311 &speed_ability);
312
313 if (status == 0) {
314 if (speed_ability & MDIO_SPEED_10G)
315 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
316 if (speed_ability & MDIO_PMA_SPEED_1000)
317 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
318 }
319
320 return status;
321}
322
323/**
324 * ixgbe_get_media_type_82599 - Get media type 322 * ixgbe_get_media_type_82599 - Get media type
325 * @hw: pointer to hardware structure 323 * @hw: pointer to hardware structure
326 * 324 *
@@ -332,7 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
332 330
333 /* Detect if there is a copper PHY attached. */ 331 /* Detect if there is a copper PHY attached. */
334 if (hw->phy.type == ixgbe_phy_cu_unknown || 332 if (hw->phy.type == ixgbe_phy_cu_unknown ||
335 hw->phy.type == ixgbe_phy_tn) { 333 hw->phy.type == ixgbe_phy_tn ||
334 hw->phy.type == ixgbe_phy_aq) {
336 media_type = ixgbe_media_type_copper; 335 media_type = ixgbe_media_type_copper;
337 goto out; 336 goto out;
338 } 337 }
@@ -342,11 +341,13 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
342 case IXGBE_DEV_ID_82599_KX4_MEZZ: 341 case IXGBE_DEV_ID_82599_KX4_MEZZ:
343 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 342 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
344 case IXGBE_DEV_ID_82599_KR: 343 case IXGBE_DEV_ID_82599_KR:
344 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
345 case IXGBE_DEV_ID_82599_XAUI_LOM: 345 case IXGBE_DEV_ID_82599_XAUI_LOM:
346 /* Default device ID is mezzanine card KX/KX4 */ 346 /* Default device ID is mezzanine card KX/KX4 */
347 media_type = ixgbe_media_type_backplane; 347 media_type = ixgbe_media_type_backplane;
348 break; 348 break;
349 case IXGBE_DEV_ID_82599_SFP: 349 case IXGBE_DEV_ID_82599_SFP:
350 case IXGBE_DEV_ID_82599_SFP_FCOE:
350 case IXGBE_DEV_ID_82599_SFP_EM: 351 case IXGBE_DEV_ID_82599_SFP_EM:
351 media_type = ixgbe_media_type_fiber; 352 media_type = ixgbe_media_type_fiber;
352 break; 353 break;
@@ -1924,6 +1925,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1924 hw->phy.ops.identify(hw); 1925 hw->phy.ops.identify(hw);
1925 1926
1926 if (hw->phy.type == ixgbe_phy_tn || 1927 if (hw->phy.type == ixgbe_phy_tn ||
1928 hw->phy.type == ixgbe_phy_aq ||
1927 hw->phy.type == ixgbe_phy_cu_unknown) { 1929 hw->phy.type == ixgbe_phy_cu_unknown) {
1928 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1930 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
1929 &ext_ability); 1931 &ext_ability);
@@ -2125,51 +2127,6 @@ fw_version_out:
2125 return status; 2127 return status;
2126} 2128}
2127 2129
2128/**
2129 * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
2130 * the EEPROM
2131 * @hw: pointer to hardware structure
2132 * @wwnn_prefix: the alternative WWNN prefix
2133 * @wwpn_prefix: the alternative WWPN prefix
2134 *
2135 * This function will read the EEPROM from the alternative SAN MAC address
2136 * block to check the support for the alternative WWNN/WWPN prefix support.
2137 **/
2138static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2139 u16 *wwpn_prefix)
2140{
2141 u16 offset, caps;
2142 u16 alt_san_mac_blk_offset;
2143
2144 /* clear output first */
2145 *wwnn_prefix = 0xFFFF;
2146 *wwpn_prefix = 0xFFFF;
2147
2148 /* check if alternative SAN MAC is supported */
2149 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2150 &alt_san_mac_blk_offset);
2151
2152 if ((alt_san_mac_blk_offset == 0) ||
2153 (alt_san_mac_blk_offset == 0xFFFF))
2154 goto wwn_prefix_out;
2155
2156 /* check capability in alternative san mac address block */
2157 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2158 hw->eeprom.ops.read(hw, offset, &caps);
2159 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2160 goto wwn_prefix_out;
2161
2162 /* get the corresponding prefix for WWNN/WWPN */
2163 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2164 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2165
2166 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2167 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2168
2169wwn_prefix_out:
2170 return 0;
2171}
2172
2173static struct ixgbe_mac_operations mac_ops_82599 = { 2130static struct ixgbe_mac_operations mac_ops_82599 = {
2174 .init_hw = &ixgbe_init_hw_generic, 2131 .init_hw = &ixgbe_init_hw_generic,
2175 .reset_hw = &ixgbe_reset_hw_82599, 2132 .reset_hw = &ixgbe_reset_hw_82599,
@@ -2181,7 +2138,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2181 .get_mac_addr = &ixgbe_get_mac_addr_generic, 2138 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2182 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 2139 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
2183 .get_device_caps = &ixgbe_get_device_caps_82599, 2140 .get_device_caps = &ixgbe_get_device_caps_82599,
2184 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599, 2141 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
2185 .stop_adapter = &ixgbe_stop_adapter_generic, 2142 .stop_adapter = &ixgbe_stop_adapter_generic,
2186 .get_bus_info = &ixgbe_get_bus_info_generic, 2143 .get_bus_info = &ixgbe_get_bus_info_generic,
2187 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 2144 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
@@ -2214,6 +2171,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2214 .init_params = &ixgbe_init_eeprom_params_generic, 2171 .init_params = &ixgbe_init_eeprom_params_generic,
2215 .read = &ixgbe_read_eerd_generic, 2172 .read = &ixgbe_read_eerd_generic,
2216 .write = &ixgbe_write_eeprom_generic, 2173 .write = &ixgbe_write_eeprom_generic,
2174 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
2217 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2175 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2218 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2176 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
2219}; 2177};
@@ -2240,5 +2198,5 @@ struct ixgbe_info ixgbe_82599_info = {
2240 .mac_ops = &mac_ops_82599, 2198 .mac_ops = &mac_ops_82599,
2241 .eeprom_ops = &eeprom_ops_82599, 2199 .eeprom_ops = &eeprom_ops_82599,
2242 .phy_ops = &phy_ops_82599, 2200 .phy_ops = &phy_ops_82599,
2243 .mbx_ops = &mbx_ops_82599, 2201 .mbx_ops = &mbx_ops_generic,
2244}; 2202};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index e3eca131638..cc11e422ce9 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -45,14 +45,12 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
45static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 45static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
49 48
50static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); 49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
51static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); 50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
52static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 52static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
54static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 53static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
55static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
56 54
57/** 55/**
58 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 56 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -198,30 +196,110 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
198} 196}
199 197
200/** 198/**
201 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 199 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
202 * @hw: pointer to hardware structure 200 * @hw: pointer to hardware structure
203 * @pba_num: stores the part number from the EEPROM 201 * @pba_num: stores the part number string from the EEPROM
202 * @pba_num_size: part number string buffer length
204 * 203 *
205 * Reads the part number from the EEPROM. 204 * Reads the part number string from the EEPROM.
206 **/ 205 **/
207s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 206s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
207 u32 pba_num_size)
208{ 208{
209 s32 ret_val; 209 s32 ret_val;
210 u16 data; 210 u16 data;
211 u16 pba_ptr;
212 u16 offset;
213 u16 length;
214
215 if (pba_num == NULL) {
216 hw_dbg(hw, "PBA string buffer was null\n");
217 return IXGBE_ERR_INVALID_ARGUMENT;
218 }
211 219
212 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 220 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
213 if (ret_val) { 221 if (ret_val) {
214 hw_dbg(hw, "NVM Read Error\n"); 222 hw_dbg(hw, "NVM Read Error\n");
215 return ret_val; 223 return ret_val;
216 } 224 }
217 *pba_num = (u32)(data << 16);
218 225
219 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 226 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
220 if (ret_val) { 227 if (ret_val) {
221 hw_dbg(hw, "NVM Read Error\n"); 228 hw_dbg(hw, "NVM Read Error\n");
222 return ret_val; 229 return ret_val;
223 } 230 }
224 *pba_num |= data; 231
232 /*
233 * if data is not ptr guard the PBA must be in legacy format which
234 * means pba_ptr is actually our second data word for the PBA number
235 * and we can decode it into an ascii string
236 */
237 if (data != IXGBE_PBANUM_PTR_GUARD) {
238 hw_dbg(hw, "NVM PBA number is not stored as string\n");
239
240 /* we will need 11 characters to store the PBA */
241 if (pba_num_size < 11) {
242 hw_dbg(hw, "PBA string buffer too small\n");
243 return IXGBE_ERR_NO_SPACE;
244 }
245
246 /* extract hex string from data and pba_ptr */
247 pba_num[0] = (data >> 12) & 0xF;
248 pba_num[1] = (data >> 8) & 0xF;
249 pba_num[2] = (data >> 4) & 0xF;
250 pba_num[3] = data & 0xF;
251 pba_num[4] = (pba_ptr >> 12) & 0xF;
252 pba_num[5] = (pba_ptr >> 8) & 0xF;
253 pba_num[6] = '-';
254 pba_num[7] = 0;
255 pba_num[8] = (pba_ptr >> 4) & 0xF;
256 pba_num[9] = pba_ptr & 0xF;
257
258 /* put a null character on the end of our string */
259 pba_num[10] = '\0';
260
261 /* switch all the data but the '-' to hex char */
262 for (offset = 0; offset < 10; offset++) {
263 if (pba_num[offset] < 0xA)
264 pba_num[offset] += '0';
265 else if (pba_num[offset] < 0x10)
266 pba_num[offset] += 'A' - 0xA;
267 }
268
269 return 0;
270 }
271
272 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
273 if (ret_val) {
274 hw_dbg(hw, "NVM Read Error\n");
275 return ret_val;
276 }
277
278 if (length == 0xFFFF || length == 0) {
279 hw_dbg(hw, "NVM PBA number section invalid length\n");
280 return IXGBE_ERR_PBA_SECTION;
281 }
282
283 /* check if pba_num buffer is big enough */
284 if (pba_num_size < (((u32)length * 2) - 1)) {
285 hw_dbg(hw, "PBA string buffer too small\n");
286 return IXGBE_ERR_NO_SPACE;
287 }
288
289 /* trim pba length from start of string */
290 pba_ptr++;
291 length--;
292
293 for (offset = 0; offset < length; offset++) {
294 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
295 if (ret_val) {
296 hw_dbg(hw, "NVM Read Error\n");
297 return ret_val;
298 }
299 pba_num[offset * 2] = (u8)(data >> 8);
300 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
301 }
302 pba_num[offset * 2] = '\0';
225 303
226 return 0; 304 return 0;
227} 305}
@@ -638,7 +716,7 @@ out:
638 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 716 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
639 * read or write is done respectively. 717 * read or write is done respectively.
640 **/ 718 **/
641static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 719s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
642{ 720{
643 u32 i; 721 u32 i;
644 u32 reg; 722 u32 reg;
@@ -1009,7 +1087,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1009 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 1087 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
1010 * @hw: pointer to hardware structure 1088 * @hw: pointer to hardware structure
1011 **/ 1089 **/
1012static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) 1090u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1013{ 1091{
1014 u16 i; 1092 u16 i;
1015 u16 j; 1093 u16 j;
@@ -1072,7 +1150,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1072 status = hw->eeprom.ops.read(hw, 0, &checksum); 1150 status = hw->eeprom.ops.read(hw, 0, &checksum);
1073 1151
1074 if (status == 0) { 1152 if (status == 0) {
1075 checksum = ixgbe_calc_eeprom_checksum(hw); 1153 checksum = hw->eeprom.ops.calc_checksum(hw);
1076 1154
1077 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1155 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1078 1156
@@ -1110,7 +1188,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1110 status = hw->eeprom.ops.read(hw, 0, &checksum); 1188 status = hw->eeprom.ops.read(hw, 0, &checksum);
1111 1189
1112 if (status == 0) { 1190 if (status == 0) {
1113 checksum = ixgbe_calc_eeprom_checksum(hw); 1191 checksum = hw->eeprom.ops.calc_checksum(hw);
1114 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1192 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1115 checksum); 1193 checksum);
1116 } else { 1194 } else {
@@ -1595,6 +1673,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1595 u32 mflcn_reg, fccfg_reg; 1673 u32 mflcn_reg, fccfg_reg;
1596 u32 reg; 1674 u32 reg;
1597 u32 rx_pba_size; 1675 u32 rx_pba_size;
1676 u32 fcrtl, fcrth;
1598 1677
1599#ifdef CONFIG_DCB 1678#ifdef CONFIG_DCB
1600 if (hw->fc.requested_mode == ixgbe_fc_pfc) 1679 if (hw->fc.requested_mode == ixgbe_fc_pfc)
@@ -1671,41 +1750,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1671 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 1750 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
1672 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 1751 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
1673 1752
1674 reg = IXGBE_READ_REG(hw, IXGBE_MTQC); 1753 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1675 /* Thresholds are different for link flow control when in DCB mode */ 1754 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
1676 if (reg & IXGBE_MTQC_RT_ENA) {
1677 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1678 1755
1679 /* Always disable XON for LFC when in DCB mode */ 1756 fcrth = (rx_pba_size - hw->fc.high_water) << 10;
1680 reg = (rx_pba_size >> 5) & 0xFFE0; 1757 fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
1681 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
1682 1758
1683 reg = (rx_pba_size >> 2) & 0xFFE0; 1759 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1684 if (hw->fc.current_mode & ixgbe_fc_tx_pause) 1760 fcrth |= IXGBE_FCRTH_FCEN;
1685 reg |= IXGBE_FCRTH_FCEN; 1761 if (hw->fc.send_xon)
1686 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg); 1762 fcrtl |= IXGBE_FCRTL_XONE;
1687 } else {
1688 /*
1689 * Set up and enable Rx high/low water mark thresholds,
1690 * enable XON.
1691 */
1692 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1693 if (hw->fc.send_xon) {
1694 IXGBE_WRITE_REG(hw,
1695 IXGBE_FCRTL_82599(packetbuf_num),
1696 (hw->fc.low_water |
1697 IXGBE_FCRTL_XONE));
1698 } else {
1699 IXGBE_WRITE_REG(hw,
1700 IXGBE_FCRTL_82599(packetbuf_num),
1701 hw->fc.low_water);
1702 }
1703
1704 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
1705 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
1706 }
1707 } 1763 }
1708 1764
1765 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
1766 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
1767
1709 /* Configure pause time (2 TCs per register) */ 1768 /* Configure pause time (2 TCs per register) */
1710 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 1769 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
1711 if ((packetbuf_num & 1) == 0) 1770 if ((packetbuf_num & 1) == 0)
@@ -2705,3 +2764,48 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2705 2764
2706 return 0; 2765 return 0;
2707} 2766}
2767
2768/**
2769 * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
2770 * the EEPROM
2771 * @hw: pointer to hardware structure
2772 * @wwnn_prefix: the alternative WWNN prefix
2773 * @wwpn_prefix: the alternative WWPN prefix
2774 *
2775 * This function will read the EEPROM from the alternative SAN MAC address
2776 * block to check the support for the alternative WWNN/WWPN prefix support.
2777 **/
2778s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2779 u16 *wwpn_prefix)
2780{
2781 u16 offset, caps;
2782 u16 alt_san_mac_blk_offset;
2783
2784 /* clear output first */
2785 *wwnn_prefix = 0xFFFF;
2786 *wwpn_prefix = 0xFFFF;
2787
2788 /* check if alternative SAN MAC is supported */
2789 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2790 &alt_san_mac_blk_offset);
2791
2792 if ((alt_san_mac_blk_offset == 0) ||
2793 (alt_san_mac_blk_offset == 0xFFFF))
2794 goto wwn_prefix_out;
2795
2796 /* check capability in alternative san mac address block */
2797 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2798 hw->eeprom.ops.read(hw, offset, &caps);
2799 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2800 goto wwn_prefix_out;
2801
2802 /* get the corresponding prefix for WWNN/WWPN */
2803 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2804 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2805
2806 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2807 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2808
2809wwn_prefix_out:
2810 return 0;
2811}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 424c223437d..e1f980a8a09 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -35,7 +35,8 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
35s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
36s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 36s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
37s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); 37s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
38s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); 38s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
39 u32 pba_num_size);
39s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); 40s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
40s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); 41s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
41void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); 42void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -49,9 +50,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
49s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); 50s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
50s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 51s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
51 u16 *data); 52 u16 *data);
53u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
52s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 54s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
53 u16 *checksum_val); 55 u16 *checksum_val);
54s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 56s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
57s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
55 58
56s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 59s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
57 u32 enable_addr); 60 u32 enable_addr);
@@ -81,7 +84,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
81s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, 84s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
82 ixgbe_link_speed *speed, 85 ixgbe_link_speed *speed,
83 bool *link_up, bool link_up_wait_to_complete); 86 bool *link_up, bool link_up_wait_to_complete);
84 87s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
88 u16 *wwpn_prefix);
85s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); 89s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
86s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); 90s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
87 91
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 0d44c6470ca..d16c260c1f5 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -42,7 +42,8 @@
42 * It should be called only after the rules are checked by 42 * It should be called only after the rules are checked by
43 * ixgbe_dcb_check_config(). 43 * ixgbe_dcb_check_config().
44 */ 44 */
45s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, 45s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
46 struct ixgbe_dcb_config *dcb_config,
46 int max_frame, u8 direction) 47 int max_frame, u8 direction)
47{ 48{
48 struct tc_bw_alloc *p; 49 struct tc_bw_alloc *p;
@@ -124,7 +125,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
124 * credit may not be enough to send out a TSO 125 * credit may not be enough to send out a TSO
125 * packet in descriptor plane arbitration. 126 * packet in descriptor plane arbitration.
126 */ 127 */
127 if (credit_max && 128 if ((hw->mac.type == ixgbe_mac_82598EB) &&
129 credit_max &&
128 (credit_max < MINIMUM_CREDIT_FOR_TSO)) 130 (credit_max < MINIMUM_CREDIT_FOR_TSO))
129 credit_max = MINIMUM_CREDIT_FOR_TSO; 131 credit_max = MINIMUM_CREDIT_FOR_TSO;
130 132
@@ -150,10 +152,17 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
150 struct ixgbe_dcb_config *dcb_config) 152 struct ixgbe_dcb_config *dcb_config)
151{ 153{
152 s32 ret = 0; 154 s32 ret = 0;
153 if (hw->mac.type == ixgbe_mac_82598EB) 155 switch (hw->mac.type) {
156 case ixgbe_mac_82598EB:
154 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); 157 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
155 else if (hw->mac.type == ixgbe_mac_82599EB) 158 break;
159 case ixgbe_mac_82599EB:
160 case ixgbe_mac_X540:
156 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); 161 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
162 break;
163 default:
164 break;
165 }
157 return ret; 166 return ret;
158} 167}
159 168
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 0208a87b129..1cfe38ee164 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,7 +150,8 @@ struct ixgbe_dcb_config {
150/* DCB driver APIs */ 150/* DCB driver APIs */
151 151
152/* DCB credits calculation */ 152/* DCB credits calculation */
153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8); 153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
154 struct ixgbe_dcb_config *, int, u8);
154 155
155/* DCB hw initialization */ 156/* DCB hw initialization */
156s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 157s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 50288bcadc5..9a5e89c12e0 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -256,21 +256,17 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
256 * for each traffic class. 256 * for each traffic class.
257 */ 257 */
258 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 258 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
259 if (dcb_config->rx_pba_cfg == pba_equal) { 259 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
260 rx_pba_size = IXGBE_RXPBSIZE_64KB; 260 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
261 } else { 261 reg = (rx_pba_size - hw->fc.low_water) << 10;
262 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
263 : IXGBE_RXPBSIZE_48KB;
264 }
265 262
266 reg = ((rx_pba_size >> 5) & 0xFFF0);
267 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 263 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
268 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 264 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
269 reg |= IXGBE_FCRTL_XONE; 265 reg |= IXGBE_FCRTL_XONE;
270 266
271 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); 267 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
272 268
273 reg = ((rx_pba_size >> 2) & 0xFFF0); 269 reg = (rx_pba_size - hw->fc.high_water) << 10;
274 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 270 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
275 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 271 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
276 reg |= IXGBE_FCRTH_FCEN; 272 reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 05f22471507..374e1f74d0f 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -251,19 +251,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
251 251
252 /* Configure PFC Tx thresholds per TC */ 252 /* Configure PFC Tx thresholds per TC */
253 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 253 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
254 if (dcb_config->rx_pba_cfg == pba_equal) 254 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
255 rx_pba_size = IXGBE_RXPBSIZE_64KB; 255 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
256 else 256
257 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB 257 reg = (rx_pba_size - hw->fc.low_water) << 10;
258 : IXGBE_RXPBSIZE_48KB;
259 258
260 reg = ((rx_pba_size >> 5) & 0xFFE0);
261 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 259 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
262 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) 260 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
263 reg |= IXGBE_FCRTL_XONE; 261 reg |= IXGBE_FCRTL_XONE;
264 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); 262 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
265 263
266 reg = ((rx_pba_size >> 2) & 0xFFE0); 264 reg = (rx_pba_size - hw->fc.high_water) << 10;
267 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 265 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
268 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) 266 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
269 reg |= IXGBE_FCRTH_FCEN; 267 reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index b53b465e24a..bf566e8a455 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -130,15 +130,21 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
130 netdev->netdev_ops->ndo_stop(netdev); 130 netdev->netdev_ops->ndo_stop(netdev);
131 ixgbe_clear_interrupt_scheme(adapter); 131 ixgbe_clear_interrupt_scheme(adapter);
132 132
133 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 133 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
134 switch (adapter->hw.mac.type) {
135 case ixgbe_mac_82598EB:
134 adapter->last_lfc_mode = adapter->hw.fc.current_mode; 136 adapter->last_lfc_mode = adapter->hw.fc.current_mode;
135 adapter->hw.fc.requested_mode = ixgbe_fc_none; 137 adapter->hw.fc.requested_mode = ixgbe_fc_none;
136 } 138 break;
137 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 139 case ixgbe_mac_82599EB:
138 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 140 case ixgbe_mac_X540:
139 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 141 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
140 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 142 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
143 break;
144 default:
145 break;
141 } 146 }
147
142 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 148 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
143 ixgbe_init_interrupt_scheme(adapter); 149 ixgbe_init_interrupt_scheme(adapter);
144 if (netif_running(netdev)) 150 if (netif_running(netdev))
@@ -155,8 +161,14 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
155 adapter->dcb_cfg.pfc_mode_enable = false; 161 adapter->dcb_cfg.pfc_mode_enable = false;
156 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 162 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
157 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 163 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
158 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 164 switch (adapter->hw.mac.type) {
165 case ixgbe_mac_82599EB:
166 case ixgbe_mac_X540:
159 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 167 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
168 break;
169 default:
170 break;
171 }
160 172
161 ixgbe_init_interrupt_scheme(adapter); 173 ixgbe_init_interrupt_scheme(adapter);
162 if (netif_running(netdev)) 174 if (netif_running(netdev))
@@ -178,9 +190,14 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
178 for (i = 0; i < netdev->addr_len; i++) 190 for (i = 0; i < netdev->addr_len; i++)
179 perm_addr[i] = adapter->hw.mac.perm_addr[i]; 191 perm_addr[i] = adapter->hw.mac.perm_addr[i];
180 192
181 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 193 switch (adapter->hw.mac.type) {
194 case ixgbe_mac_82599EB:
195 case ixgbe_mac_X540:
182 for (j = 0; j < netdev->addr_len; j++, i++) 196 for (j = 0; j < netdev->addr_len; j++, i++)
183 perm_addr[i] = adapter->hw.mac.san_addr[j]; 197 perm_addr[i] = adapter->hw.mac.san_addr[j];
198 break;
199 default:
200 break;
184 } 201 }
185} 202}
186 203
@@ -366,15 +383,29 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
366 } 383 }
367 384
368 if (adapter->dcb_cfg.pfc_mode_enable) { 385 if (adapter->dcb_cfg.pfc_mode_enable) {
369 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && 386 switch (adapter->hw.mac.type) {
370 (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) 387 case ixgbe_mac_82599EB:
371 adapter->last_lfc_mode = adapter->hw.fc.current_mode; 388 case ixgbe_mac_X540:
389 if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
390 adapter->last_lfc_mode =
391 adapter->hw.fc.current_mode;
392 break;
393 default:
394 break;
395 }
372 adapter->hw.fc.requested_mode = ixgbe_fc_pfc; 396 adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
373 } else { 397 } else {
374 if (adapter->hw.mac.type != ixgbe_mac_82598EB) 398 switch (adapter->hw.mac.type) {
375 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 399 case ixgbe_mac_82598EB:
376 else
377 adapter->hw.fc.requested_mode = ixgbe_fc_none; 400 adapter->hw.fc.requested_mode = ixgbe_fc_none;
401 break;
402 case ixgbe_mac_82599EB:
403 case ixgbe_mac_X540:
404 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
405 break;
406 default:
407 break;
408 }
378 } 409 }
379 410
380 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 411 if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3dc731c22ff..ef3f9105a05 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -185,6 +185,16 @@ static int ixgbe_get_settings(struct net_device *netdev,
185 ADVERTISED_FIBRE); 185 ADVERTISED_FIBRE);
186 ecmd->port = PORT_FIBRE; 186 ecmd->port = PORT_FIBRE;
187 ecmd->autoneg = AUTONEG_DISABLE; 187 ecmd->autoneg = AUTONEG_DISABLE;
188 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
189 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
190 ecmd->supported |= (SUPPORTED_1000baseT_Full |
191 SUPPORTED_Autoneg |
192 SUPPORTED_FIBRE);
193 ecmd->advertising = (ADVERTISED_10000baseT_Full |
194 ADVERTISED_1000baseT_Full |
195 ADVERTISED_Autoneg |
196 ADVERTISED_FIBRE);
197 ecmd->port = PORT_FIBRE;
188 } else { 198 } else {
189 ecmd->supported |= (SUPPORTED_1000baseT_Full | 199 ecmd->supported |= (SUPPORTED_1000baseT_Full |
190 SUPPORTED_FIBRE); 200 SUPPORTED_FIBRE);
@@ -204,6 +214,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
204 /* Get PHY type */ 214 /* Get PHY type */
205 switch (adapter->hw.phy.type) { 215 switch (adapter->hw.phy.type) {
206 case ixgbe_phy_tn: 216 case ixgbe_phy_tn:
217 case ixgbe_phy_aq:
207 case ixgbe_phy_cu_unknown: 218 case ixgbe_phy_cu_unknown:
208 /* Copper 10G-BASET */ 219 /* Copper 10G-BASET */
209 ecmd->port = PORT_TP; 220 ecmd->port = PORT_TP;
@@ -332,13 +343,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
332 else 343 else
333 pause->autoneg = 1; 344 pause->autoneg = 1;
334 345
335#ifdef CONFIG_DCB
336 if (hw->fc.current_mode == ixgbe_fc_pfc) {
337 pause->rx_pause = 0;
338 pause->tx_pause = 0;
339 }
340
341#endif
342 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 346 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
343 pause->rx_pause = 1; 347 pause->rx_pause = 1;
344 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 348 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -346,6 +350,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
346 } else if (hw->fc.current_mode == ixgbe_fc_full) { 350 } else if (hw->fc.current_mode == ixgbe_fc_full) {
347 pause->rx_pause = 1; 351 pause->rx_pause = 1;
348 pause->tx_pause = 1; 352 pause->tx_pause = 1;
353#ifdef CONFIG_DCB
354 } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
355 pause->rx_pause = 0;
356 pause->tx_pause = 0;
357#endif
349 } 358 }
350} 359}
351 360
@@ -363,7 +372,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
363 return -EINVAL; 372 return -EINVAL;
364 373
365#endif 374#endif
366
367 fc = hw->fc; 375 fc = hw->fc;
368 376
369 if (pause->autoneg != AUTONEG_ENABLE) 377 if (pause->autoneg != AUTONEG_ENABLE)
@@ -412,11 +420,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
412 else 420 else
413 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; 421 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
414 422
415 if (netif_running(netdev))
416 ixgbe_reinit_locked(adapter);
417 else
418 ixgbe_reset(adapter);
419
420 return 0; 423 return 0;
421} 424}
422 425
@@ -428,16 +431,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
428static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) 431static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
429{ 432{
430 struct ixgbe_adapter *adapter = netdev_priv(netdev); 433 struct ixgbe_adapter *adapter = netdev_priv(netdev);
434 u32 feature_list;
431 435
432 if (data) { 436 feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
433 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 437 switch (adapter->hw.mac.type) {
434 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 438 case ixgbe_mac_82599EB:
435 netdev->features |= NETIF_F_SCTP_CSUM; 439 case ixgbe_mac_X540:
436 } else { 440 feature_list |= NETIF_F_SCTP_CSUM;
437 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 441 break;
438 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 442 default:
439 netdev->features &= ~NETIF_F_SCTP_CSUM; 443 break;
440 } 444 }
445 if (data)
446 netdev->features |= feature_list;
447 else
448 netdev->features &= ~feature_list;
441 449
442 return 0; 450 return 0;
443} 451}
@@ -530,10 +538,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
530 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 538 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
531 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 539 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
532 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 540 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
533 for (i = 0; i < 8; i++) 541 for (i = 0; i < 8; i++) {
534 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 542 switch (hw->mac.type) {
535 for (i = 0; i < 8; i++) 543 case ixgbe_mac_82598EB:
536 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 544 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
545 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
546 break;
547 case ixgbe_mac_82599EB:
548 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
549 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
550 break;
551 default:
552 break;
553 }
554 }
537 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 555 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
538 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 556 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
539 557
@@ -615,6 +633,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
615 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 633 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
616 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 634 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
617 635
636 /* DCB */
618 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 637 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
619 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 638 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
620 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 639 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -905,13 +924,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
905 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 924 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
906 sizeof(struct ixgbe_ring)); 925 sizeof(struct ixgbe_ring));
907 temp_tx_ring[i].count = new_tx_count; 926 temp_tx_ring[i].count = new_tx_count;
908 err = ixgbe_setup_tx_resources(adapter, 927 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
909 &temp_tx_ring[i]);
910 if (err) { 928 if (err) {
911 while (i) { 929 while (i) {
912 i--; 930 i--;
913 ixgbe_free_tx_resources(adapter, 931 ixgbe_free_tx_resources(&temp_tx_ring[i]);
914 &temp_tx_ring[i]);
915 } 932 }
916 goto clear_reset; 933 goto clear_reset;
917 } 934 }
@@ -930,13 +947,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
930 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 947 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
931 sizeof(struct ixgbe_ring)); 948 sizeof(struct ixgbe_ring));
932 temp_rx_ring[i].count = new_rx_count; 949 temp_rx_ring[i].count = new_rx_count;
933 err = ixgbe_setup_rx_resources(adapter, 950 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
934 &temp_rx_ring[i]);
935 if (err) { 951 if (err) {
936 while (i) { 952 while (i) {
937 i--; 953 i--;
938 ixgbe_free_rx_resources(adapter, 954 ixgbe_free_rx_resources(&temp_rx_ring[i]);
939 &temp_rx_ring[i]);
940 } 955 }
941 goto err_setup; 956 goto err_setup;
942 } 957 }
@@ -951,8 +966,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
951 /* tx */ 966 /* tx */
952 if (new_tx_count != adapter->tx_ring_count) { 967 if (new_tx_count != adapter->tx_ring_count) {
953 for (i = 0; i < adapter->num_tx_queues; i++) { 968 for (i = 0; i < adapter->num_tx_queues; i++) {
954 ixgbe_free_tx_resources(adapter, 969 ixgbe_free_tx_resources(adapter->tx_ring[i]);
955 adapter->tx_ring[i]);
956 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 970 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
957 sizeof(struct ixgbe_ring)); 971 sizeof(struct ixgbe_ring));
958 } 972 }
@@ -962,8 +976,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
962 /* rx */ 976 /* rx */
963 if (new_rx_count != adapter->rx_ring_count) { 977 if (new_rx_count != adapter->rx_ring_count) {
964 for (i = 0; i < adapter->num_rx_queues; i++) { 978 for (i = 0; i < adapter->num_rx_queues; i++) {
965 ixgbe_free_rx_resources(adapter, 979 ixgbe_free_rx_resources(adapter->rx_ring[i]);
966 adapter->rx_ring[i]);
967 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 980 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
968 sizeof(struct ixgbe_ring)); 981 sizeof(struct ixgbe_ring));
969 } 982 }
@@ -1237,12 +1250,20 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1237 u32 value, before, after; 1250 u32 value, before, after;
1238 u32 i, toggle; 1251 u32 i, toggle;
1239 1252
1240 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1253 switch (adapter->hw.mac.type) {
1241 toggle = 0x7FFFF30F; 1254 case ixgbe_mac_82598EB:
1242 test = reg_test_82599;
1243 } else {
1244 toggle = 0x7FFFF3FF; 1255 toggle = 0x7FFFF3FF;
1245 test = reg_test_82598; 1256 test = reg_test_82598;
1257 break;
1258 case ixgbe_mac_82599EB:
1259 case ixgbe_mac_X540:
1260 toggle = 0x7FFFF30F;
1261 test = reg_test_82599;
1262 break;
1263 default:
1264 *data = 1;
1265 return 1;
1266 break;
1246 } 1267 }
1247 1268
1248 /* 1269 /*
@@ -1460,16 +1481,21 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1460 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1481 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1461 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); 1482 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1462 1483
1463 if (hw->mac.type == ixgbe_mac_82599EB) { 1484 switch (hw->mac.type) {
1485 case ixgbe_mac_82599EB:
1486 case ixgbe_mac_X540:
1464 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1487 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1465 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1488 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1466 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1489 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1490 break;
1491 default:
1492 break;
1467 } 1493 }
1468 1494
1469 ixgbe_reset(adapter); 1495 ixgbe_reset(adapter);
1470 1496
1471 ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring); 1497 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1472 ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring); 1498 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1473} 1499}
1474 1500
1475static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1501static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1483,17 +1509,24 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1483 /* Setup Tx descriptor ring and Tx buffers */ 1509 /* Setup Tx descriptor ring and Tx buffers */
1484 tx_ring->count = IXGBE_DEFAULT_TXD; 1510 tx_ring->count = IXGBE_DEFAULT_TXD;
1485 tx_ring->queue_index = 0; 1511 tx_ring->queue_index = 0;
1512 tx_ring->dev = &adapter->pdev->dev;
1513 tx_ring->netdev = adapter->netdev;
1486 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1514 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1487 tx_ring->numa_node = adapter->node; 1515 tx_ring->numa_node = adapter->node;
1488 1516
1489 err = ixgbe_setup_tx_resources(adapter, tx_ring); 1517 err = ixgbe_setup_tx_resources(tx_ring);
1490 if (err) 1518 if (err)
1491 return 1; 1519 return 1;
1492 1520
1493 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1521 switch (adapter->hw.mac.type) {
1522 case ixgbe_mac_82599EB:
1523 case ixgbe_mac_X540:
1494 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1524 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1495 reg_data |= IXGBE_DMATXCTL_TE; 1525 reg_data |= IXGBE_DMATXCTL_TE;
1496 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1526 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1527 break;
1528 default:
1529 break;
1497 } 1530 }
1498 1531
1499 ixgbe_configure_tx_ring(adapter, tx_ring); 1532 ixgbe_configure_tx_ring(adapter, tx_ring);
@@ -1501,11 +1534,13 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1501 /* Setup Rx Descriptor ring and Rx buffers */ 1534 /* Setup Rx Descriptor ring and Rx buffers */
1502 rx_ring->count = IXGBE_DEFAULT_RXD; 1535 rx_ring->count = IXGBE_DEFAULT_RXD;
1503 rx_ring->queue_index = 0; 1536 rx_ring->queue_index = 0;
1537 rx_ring->dev = &adapter->pdev->dev;
1538 rx_ring->netdev = adapter->netdev;
1504 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1539 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1505 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; 1540 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1506 rx_ring->numa_node = adapter->node; 1541 rx_ring->numa_node = adapter->node;
1507 1542
1508 err = ixgbe_setup_rx_resources(adapter, rx_ring); 1543 err = ixgbe_setup_rx_resources(rx_ring);
1509 if (err) { 1544 if (err) {
1510 ret_val = 4; 1545 ret_val = 4;
1511 goto err_nomem; 1546 goto err_nomem;
@@ -1604,8 +1639,7 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1604 return 13; 1639 return 13;
1605} 1640}
1606 1641
1607static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, 1642static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1608 struct ixgbe_ring *rx_ring,
1609 struct ixgbe_ring *tx_ring, 1643 struct ixgbe_ring *tx_ring,
1610 unsigned int size) 1644 unsigned int size)
1611{ 1645{
@@ -1627,7 +1661,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1627 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1661 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1628 1662
1629 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ 1663 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1630 dma_unmap_single(&adapter->pdev->dev, 1664 dma_unmap_single(rx_ring->dev,
1631 rx_buffer_info->dma, 1665 rx_buffer_info->dma,
1632 bufsz, 1666 bufsz,
1633 DMA_FROM_DEVICE); 1667 DMA_FROM_DEVICE);
@@ -1639,7 +1673,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1639 1673
1640 /* unmap buffer on Tx side */ 1674 /* unmap buffer on Tx side */
1641 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1675 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1642 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 1676 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1643 1677
1644 /* increment Rx/Tx next to clean counters */ 1678 /* increment Rx/Tx next to clean counters */
1645 rx_ntc++; 1679 rx_ntc++;
@@ -1655,7 +1689,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1655 } 1689 }
1656 1690
1657 /* re-map buffers to ring, store next to clean values */ 1691 /* re-map buffers to ring, store next to clean values */
1658 ixgbe_alloc_rx_buffers(adapter, rx_ring, count); 1692 ixgbe_alloc_rx_buffers(rx_ring, count);
1659 rx_ring->next_to_clean = rx_ntc; 1693 rx_ring->next_to_clean = rx_ntc;
1660 tx_ring->next_to_clean = tx_ntc; 1694 tx_ring->next_to_clean = tx_ntc;
1661 1695
@@ -1699,7 +1733,6 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1699 for (i = 0; i < 64; i++) { 1733 for (i = 0; i < 64; i++) {
1700 skb_get(skb); 1734 skb_get(skb);
1701 tx_ret_val = ixgbe_xmit_frame_ring(skb, 1735 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1702 adapter->netdev,
1703 adapter, 1736 adapter,
1704 tx_ring); 1737 tx_ring);
1705 if (tx_ret_val == NETDEV_TX_OK) 1738 if (tx_ret_val == NETDEV_TX_OK)
@@ -1714,8 +1747,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1714 /* allow 200 milliseconds for packets to go from Tx to Rx */ 1747 /* allow 200 milliseconds for packets to go from Tx to Rx */
1715 msleep(200); 1748 msleep(200);
1716 1749
1717 good_cnt = ixgbe_clean_test_rings(adapter, rx_ring, 1750 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1718 tx_ring, size);
1719 if (good_cnt != 64) { 1751 if (good_cnt != 64) {
1720 ret_val = 13; 1752 ret_val = 13;
1721 break; 1753 break;
@@ -1847,7 +1879,25 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1847 struct ixgbe_hw *hw = &adapter->hw; 1879 struct ixgbe_hw *hw = &adapter->hw;
1848 int retval = 1; 1880 int retval = 1;
1849 1881
1882 /* WOL not supported except for the following */
1850 switch(hw->device_id) { 1883 switch(hw->device_id) {
1884 case IXGBE_DEV_ID_82599_SFP:
1885 /* Only this subdevice supports WOL */
1886 if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
1887 wol->supported = 0;
1888 break;
1889 }
1890 retval = 0;
1891 break;
1892 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1893 /* All except this subdevice support WOL */
1894 if (hw->subsystem_device_id ==
1895 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
1896 wol->supported = 0;
1897 break;
1898 }
1899 retval = 0;
1900 break;
1851 case IXGBE_DEV_ID_82599_KX4: 1901 case IXGBE_DEV_ID_82599_KX4:
1852 retval = 0; 1902 retval = 0;
1853 break; 1903 break;
@@ -1985,6 +2035,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
1985 return 0; 2035 return 0;
1986} 2036}
1987 2037
2038/*
2039 * this function must be called before setting the new value of
2040 * rx_itr_setting
2041 */
2042static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
2043 struct ethtool_coalesce *ec)
2044{
2045 struct net_device *netdev = adapter->netdev;
2046
2047 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2048 return false;
2049
2050 /* if interrupt rate is too high then disable RSC */
2051 if (ec->rx_coalesce_usecs != 1 &&
2052 ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
2053 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2054 e_info(probe, "rx-usecs set too low, "
2055 "disabling RSC\n");
2056 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2057 return true;
2058 }
2059 } else {
2060 /* check the feature flag value and enable RSC if necessary */
2061 if ((netdev->features & NETIF_F_LRO) &&
2062 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2063 e_info(probe, "rx-usecs set to %d, "
2064 "re-enabling RSC\n",
2065 ec->rx_coalesce_usecs);
2066 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2067 return true;
2068 }
2069 }
2070 return false;
2071}
2072
1988static int ixgbe_set_coalesce(struct net_device *netdev, 2073static int ixgbe_set_coalesce(struct net_device *netdev,
1989 struct ethtool_coalesce *ec) 2074 struct ethtool_coalesce *ec)
1990{ 2075{
@@ -2002,17 +2087,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2002 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2087 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2003 2088
2004 if (ec->rx_coalesce_usecs > 1) { 2089 if (ec->rx_coalesce_usecs > 1) {
2005 u32 max_int;
2006 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2007 max_int = IXGBE_MAX_RSC_INT_RATE;
2008 else
2009 max_int = IXGBE_MAX_INT_RATE;
2010
2011 /* check the limits */ 2090 /* check the limits */
2012 if ((1000000/ec->rx_coalesce_usecs > max_int) || 2091 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2013 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2092 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2014 return -EINVAL; 2093 return -EINVAL;
2015 2094
2095 /* check the old value and enable RSC if necessary */
2096 need_reset = ixgbe_update_rsc(adapter, ec);
2097
2016 /* store the value in ints/second */ 2098 /* store the value in ints/second */
2017 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2099 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2018 2100
@@ -2021,32 +2103,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2021 /* clear the lower bit as its used for dynamic state */ 2103 /* clear the lower bit as its used for dynamic state */
2022 adapter->rx_itr_setting &= ~1; 2104 adapter->rx_itr_setting &= ~1;
2023 } else if (ec->rx_coalesce_usecs == 1) { 2105 } else if (ec->rx_coalesce_usecs == 1) {
2106 /* check the old value and enable RSC if necessary */
2107 need_reset = ixgbe_update_rsc(adapter, ec);
2108
2024 /* 1 means dynamic mode */ 2109 /* 1 means dynamic mode */
2025 adapter->rx_eitr_param = 20000; 2110 adapter->rx_eitr_param = 20000;
2026 adapter->rx_itr_setting = 1; 2111 adapter->rx_itr_setting = 1;
2027 } else { 2112 } else {
2113 /* check the old value and enable RSC if necessary */
2114 need_reset = ixgbe_update_rsc(adapter, ec);
2028 /* 2115 /*
2029 * any other value means disable eitr, which is best 2116 * any other value means disable eitr, which is best
2030 * served by setting the interrupt rate very high 2117 * served by setting the interrupt rate very high
2031 */ 2118 */
2032 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; 2119 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2033 adapter->rx_itr_setting = 0; 2120 adapter->rx_itr_setting = 0;
2034
2035 /*
2036 * if hardware RSC is enabled, disable it when
2037 * setting low latency mode, to avoid errata, assuming
2038 * that when the user set low latency mode they want
2039 * it at the cost of anything else
2040 */
2041 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2042 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2043 if (netdev->features & NETIF_F_LRO) {
2044 netdev->features &= ~NETIF_F_LRO;
2045 e_info(probe, "rx-usecs set to 0, "
2046 "disabling RSC\n");
2047 }
2048 need_reset = true;
2049 }
2050 } 2121 }
2051 2122
2052 if (ec->tx_coalesce_usecs > 1) { 2123 if (ec->tx_coalesce_usecs > 1) {
@@ -2133,28 +2204,39 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2133 return rc; 2204 return rc;
2134 2205
2135 /* if state changes we need to update adapter->flags and reset */ 2206 /* if state changes we need to update adapter->flags and reset */
2136 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { 2207 if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
2137 /* 2208 (!!(data & ETH_FLAG_LRO) !=
2138 * cast both to bool and verify if they are set the same 2209 !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2139 * but only enable RSC if itr is non-zero, as 2210 if ((data & ETH_FLAG_LRO) &&
2140 * itr=0 and RSC are mutually exclusive 2211 (!adapter->rx_itr_setting ||
2141 */ 2212 (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
2142 if (((!!(data & ETH_FLAG_LRO)) != 2213 e_info(probe, "rx-usecs set too low, "
2143 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) && 2214 "not enabling RSC.\n");
2144 adapter->rx_itr_setting) { 2215 } else {
2145 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2216 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2146 switch (adapter->hw.mac.type) { 2217 switch (adapter->hw.mac.type) {
2147 case ixgbe_mac_82599EB: 2218 case ixgbe_mac_82599EB:
2148 need_reset = true; 2219 need_reset = true;
2149 break; 2220 break;
2221 case ixgbe_mac_X540: {
2222 int i;
2223 for (i = 0; i < adapter->num_rx_queues; i++) {
2224 struct ixgbe_ring *ring =
2225 adapter->rx_ring[i];
2226 if (adapter->flags2 &
2227 IXGBE_FLAG2_RSC_ENABLED) {
2228 ixgbe_configure_rscctl(adapter,
2229 ring);
2230 } else {
2231 ixgbe_clear_rscctl(adapter,
2232 ring);
2233 }
2234 }
2235 }
2236 break;
2150 default: 2237 default:
2151 break; 2238 break;
2152 } 2239 }
2153 } else if (!adapter->rx_itr_setting) {
2154 netdev->features &= ~NETIF_F_LRO;
2155 if (data & ETH_FLAG_LRO)
2156 e_info(probe, "rx-usecs set to 0, "
2157 "LRO/RSC cannot be enabled.\n");
2158 } 2240 }
2159 } 2241 }
2160 2242
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 05efa6a8ce8..6342d485979 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
68static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) 68static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
69{ 69{
70 ddp->len = 0; 70 ddp->len = 0;
71 ddp->err = 0; 71 ddp->err = 1;
72 ddp->udl = NULL; 72 ddp->udl = NULL;
73 ddp->udp = 0UL; 73 ddp->udp = 0UL;
74 ddp->sgl = NULL; 74 ddp->sgl = NULL;
@@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
92 struct ixgbe_fcoe *fcoe; 92 struct ixgbe_fcoe *fcoe;
93 struct ixgbe_adapter *adapter; 93 struct ixgbe_adapter *adapter;
94 struct ixgbe_fcoe_ddp *ddp; 94 struct ixgbe_fcoe_ddp *ddp;
95 u32 fcbuff;
95 96
96 if (!netdev) 97 if (!netdev)
97 goto out_ddp_put; 98 goto out_ddp_put;
@@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
115 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); 116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 117 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
117 (xid | IXGBE_FCDMARW_WE)); 118 (xid | IXGBE_FCDMARW_WE));
119
120 /* guaranteed to be invalidated after 100us */
121 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
122 (xid | IXGBE_FCDMARW_RE));
123 fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
118 spin_unlock_bh(&fcoe->lock); 124 spin_unlock_bh(&fcoe->lock);
125 if (fcbuff & IXGBE_FCBUFF_VALID)
126 udelay(100);
119 } 127 }
120 if (ddp->sgl) 128 if (ddp->sgl)
121 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, 129 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
@@ -168,6 +176,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
168 return 0; 176 return 0;
169 } 177 }
170 178
179 /* no DDP if we are already down or resetting */
180 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
181 test_bit(__IXGBE_RESETTING, &adapter->state))
182 return 0;
183
171 fcoe = &adapter->fcoe; 184 fcoe = &adapter->fcoe;
172 if (!fcoe->pool) { 185 if (!fcoe->pool) {
173 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); 186 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index eee0b298bd3..fdb35d040d2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,13 +52,14 @@ char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "2.0.84-k2" 55#define DRV_VERSION "3.0.12-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58 58
59static const struct ixgbe_info *ixgbe_info_tbl[] = { 59static const struct ixgbe_info *ixgbe_info_tbl[] = {
60 [board_82598] = &ixgbe_82598_info, 60 [board_82598] = &ixgbe_82598_info,
61 [board_82599] = &ixgbe_82599_info, 61 [board_82599] = &ixgbe_82599_info,
62 [board_X540] = &ixgbe_X540_info,
62}; 63};
63 64
64/* ixgbe_pci_tbl - PCI Device ID Table 65/* ixgbe_pci_tbl - PCI Device ID Table
@@ -108,10 +109,16 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
108 board_82599 }, 109 board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110 board_82599 }, 111 board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
113 board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
115 board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), 116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
112 board_82599 }, 117 board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
114 board_82599 }, 119 board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
121 board_82599 },
115 122
116 /* required last entry */ 123 /* required last entry */
117 {0, } 124 {0, }
@@ -560,6 +567,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
560 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 567 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
561 break; 568 break;
562 case ixgbe_mac_82599EB: 569 case ixgbe_mac_82599EB:
570 case ixgbe_mac_X540:
563 if (direction == -1) { 571 if (direction == -1) {
564 /* other causes */ 572 /* other causes */
565 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 573 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -589,29 +597,34 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
589{ 597{
590 u32 mask; 598 u32 mask;
591 599
592 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 600 switch (adapter->hw.mac.type) {
601 case ixgbe_mac_82598EB:
593 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 602 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
594 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 603 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
595 } else { 604 break;
605 case ixgbe_mac_82599EB:
606 case ixgbe_mac_X540:
596 mask = (qmask & 0xFFFFFFFF); 607 mask = (qmask & 0xFFFFFFFF);
597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 608 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
598 mask = (qmask >> 32); 609 mask = (qmask >> 32);
599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 610 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
611 break;
612 default:
613 break;
600 } 614 }
601} 615}
602 616
603void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 617void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
604 struct ixgbe_tx_buffer 618 struct ixgbe_tx_buffer *tx_buffer_info)
605 *tx_buffer_info)
606{ 619{
607 if (tx_buffer_info->dma) { 620 if (tx_buffer_info->dma) {
608 if (tx_buffer_info->mapped_as_page) 621 if (tx_buffer_info->mapped_as_page)
609 dma_unmap_page(&adapter->pdev->dev, 622 dma_unmap_page(tx_ring->dev,
610 tx_buffer_info->dma, 623 tx_buffer_info->dma,
611 tx_buffer_info->length, 624 tx_buffer_info->length,
612 DMA_TO_DEVICE); 625 DMA_TO_DEVICE);
613 else 626 else
614 dma_unmap_single(&adapter->pdev->dev, 627 dma_unmap_single(tx_ring->dev,
615 tx_buffer_info->dma, 628 tx_buffer_info->dma,
616 tx_buffer_info->length, 629 tx_buffer_info->length,
617 DMA_TO_DEVICE); 630 DMA_TO_DEVICE);
@@ -626,92 +639,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
626} 639}
627 640
628/** 641/**
629 * ixgbe_tx_xon_state - check the tx ring xon state 642 * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
630 * @adapter: the ixgbe adapter 643 * @adapter: driver private struct
631 * @tx_ring: the corresponding tx_ring 644 * @index: reg idx of queue to query (0-127)
632 * 645 *
633 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the 646 * Helper function to determine the traffic index for a paticular
634 * corresponding TC of this tx_ring when checking TFCS. 647 * register index.
635 * 648 *
636 * Returns : true if in xon state (currently not paused) 649 * Returns : a tc index for use in range 0-7, or 0-3
637 */ 650 */
638static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, 651u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
639 struct ixgbe_ring *tx_ring)
640{ 652{
641 u32 txoff = IXGBE_TFCS_TXOFF; 653 int tc = -1;
654 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
642 655
643#ifdef CONFIG_IXGBE_DCB 656 /* if DCB is not enabled the queues have no TC */
644 if (adapter->dcb_cfg.pfc_mode_enable) { 657 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
645 int tc; 658 return tc;
646 int reg_idx = tx_ring->reg_idx; 659
647 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 660 /* check valid range */
661 if (reg_idx >= adapter->hw.mac.max_tx_queues)
662 return tc;
648 663
649 switch (adapter->hw.mac.type) { 664 switch (adapter->hw.mac.type) {
665 case ixgbe_mac_82598EB:
666 tc = reg_idx >> 2;
667 break;
668 default:
669 if (dcb_i != 4 && dcb_i != 8)
670 break;
671
672 /* if VMDq is enabled the lowest order bits determine TC */
673 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
674 IXGBE_FLAG_VMDQ_ENABLED)) {
675 tc = reg_idx & (dcb_i - 1);
676 break;
677 }
678
679 /*
680 * Convert the reg_idx into the correct TC. This bitmask
681 * targets the last full 32 ring traffic class and assigns
682 * it a value of 1. From there the rest of the rings are
683 * based on shifting the mask further up to include the
684 * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
685 * will only ever be 8 or 4 and that reg_idx will never
686 * be greater then 128. The code without the power of 2
687 * optimizations would be:
688 * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
689 */
690 tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
691 tc >>= 9 - (reg_idx >> 5);
692 }
693
694 return tc;
695}
696
697static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
698{
699 struct ixgbe_hw *hw = &adapter->hw;
700 struct ixgbe_hw_stats *hwstats = &adapter->stats;
701 u32 data = 0;
702 u32 xoff[8] = {0};
703 int i;
704
705 if ((hw->fc.current_mode == ixgbe_fc_full) ||
706 (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
707 switch (hw->mac.type) {
650 case ixgbe_mac_82598EB: 708 case ixgbe_mac_82598EB:
651 tc = reg_idx >> 2; 709 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
652 txoff = IXGBE_TFCS_TXOFF0;
653 break; 710 break;
654 case ixgbe_mac_82599EB: 711 default:
655 tc = 0; 712 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
656 txoff = IXGBE_TFCS_TXOFF; 713 }
657 if (dcb_i == 8) { 714 hwstats->lxoffrxc += data;
658 /* TC0, TC1 */ 715
659 tc = reg_idx >> 5; 716 /* refill credits (no tx hang) if we received xoff */
660 if (tc == 2) /* TC2, TC3 */ 717 if (!data)
661 tc += (reg_idx - 64) >> 4; 718 return;
662 else if (tc == 3) /* TC4, TC5, TC6, TC7 */ 719
663 tc += 1 + ((reg_idx - 96) >> 3); 720 for (i = 0; i < adapter->num_tx_queues; i++)
664 } else if (dcb_i == 4) { 721 clear_bit(__IXGBE_HANG_CHECK_ARMED,
665 /* TC0, TC1 */ 722 &adapter->tx_ring[i]->state);
666 tc = reg_idx >> 6; 723 return;
667 if (tc == 1) { 724 } else if (!(adapter->dcb_cfg.pfc_mode_enable))
668 tc += (reg_idx - 64) >> 5; 725 return;
669 if (tc == 2) /* TC2, TC3 */ 726
670 tc += (reg_idx - 96) >> 4; 727 /* update stats for each tc, only valid with PFC enabled */
671 } 728 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
672 } 729 switch (hw->mac.type) {
730 case ixgbe_mac_82598EB:
731 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
673 break; 732 break;
674 default: 733 default:
675 tc = 0; 734 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
676 } 735 }
677 txoff <<= tc; 736 hwstats->pxoffrxc[i] += xoff[i];
678 } 737 }
679#endif 738
680 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff; 739 /* disarm tx queues that have received xoff frames */
740 for (i = 0; i < adapter->num_tx_queues; i++) {
741 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
742 u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
743
744 if (xoff[tc])
745 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
746 }
747}
748
749static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
750{
751 return ring->tx_stats.completed;
681} 752}
682 753
683static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 754static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
684 struct ixgbe_ring *tx_ring,
685 unsigned int eop)
686{ 755{
756 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
687 struct ixgbe_hw *hw = &adapter->hw; 757 struct ixgbe_hw *hw = &adapter->hw;
688 758
689 /* Detect a transmit hang in hardware, this serializes the 759 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
690 * check with the clearing of time_stamp and movement of eop */ 760 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
691 adapter->detect_tx_hung = false; 761
692 if (tx_ring->tx_buffer_info[eop].time_stamp && 762 if (head != tail)
693 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 763 return (head < tail) ?
694 ixgbe_tx_xon_state(adapter, tx_ring)) { 764 tail - head : (tail + ring->count - head);
695 /* detected Tx unit hang */ 765
696 union ixgbe_adv_tx_desc *tx_desc; 766 return 0;
697 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); 767}
698 e_err(drv, "Detected Tx Unit Hang\n" 768
699 " Tx Queue <%d>\n" 769static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
700 " TDH, TDT <%x>, <%x>\n" 770{
701 " next_to_use <%x>\n" 771 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
702 " next_to_clean <%x>\n" 772 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
703 "tx_buffer_info[next_to_clean]\n" 773 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
704 " time_stamp <%lx>\n" 774 bool ret = false;
705 " jiffies <%lx>\n", 775
706 tx_ring->queue_index, 776 clear_check_for_tx_hang(tx_ring);
707 IXGBE_READ_REG(hw, tx_ring->head), 777
708 IXGBE_READ_REG(hw, tx_ring->tail), 778 /*
709 tx_ring->next_to_use, eop, 779 * Check for a hung queue, but be thorough. This verifies
710 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 780 * that a transmit has been completed since the previous
711 return true; 781 * check AND there is at least one packet pending. The
782 * ARMED bit is set to indicate a potential hang. The
783 * bit is cleared if a pause frame is received to remove
784 * false hang detection due to PFC or 802.3x frames. By
785 * requiring this to fail twice we avoid races with
786 * pfc clearing the ARMED bit and conditions where we
787 * run the check_tx_hang logic with a transmit completion
788 * pending but without time to complete it yet.
789 */
790 if ((tx_done_old == tx_done) && tx_pending) {
791 /* make sure it is true for two checks in a row */
792 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
793 &tx_ring->state);
794 } else {
795 /* update completed stats and continue */
796 tx_ring->tx_stats.tx_done_old = tx_done;
797 /* reset the countdown */
798 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
712 } 799 }
713 800
714 return false; 801 return ret;
715} 802}
716 803
717#define IXGBE_MAX_TXD_PWR 14 804#define IXGBE_MAX_TXD_PWR 14
@@ -734,11 +821,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
734 struct ixgbe_ring *tx_ring) 821 struct ixgbe_ring *tx_ring)
735{ 822{
736 struct ixgbe_adapter *adapter = q_vector->adapter; 823 struct ixgbe_adapter *adapter = q_vector->adapter;
737 struct net_device *netdev = adapter->netdev;
738 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 824 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
739 struct ixgbe_tx_buffer *tx_buffer_info; 825 struct ixgbe_tx_buffer *tx_buffer_info;
740 unsigned int i, eop, count = 0;
741 unsigned int total_bytes = 0, total_packets = 0; 826 unsigned int total_bytes = 0, total_packets = 0;
827 u16 i, eop, count = 0;
742 828
743 i = tx_ring->next_to_clean; 829 i = tx_ring->next_to_clean;
744 eop = tx_ring->tx_buffer_info[i].next_to_watch; 830 eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -749,148 +835,182 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
749 bool cleaned = false; 835 bool cleaned = false;
750 rmb(); /* read buffer_info after eop_desc */ 836 rmb(); /* read buffer_info after eop_desc */
751 for ( ; !cleaned; count++) { 837 for ( ; !cleaned; count++) {
752 struct sk_buff *skb;
753 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); 838 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
754 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 839 tx_buffer_info = &tx_ring->tx_buffer_info[i];
755 cleaned = (i == eop);
756 skb = tx_buffer_info->skb;
757
758 if (cleaned && skb) {
759 unsigned int segs, bytecount;
760 unsigned int hlen = skb_headlen(skb);
761
762 /* gso_segs is currently only valid for tcp */
763 segs = skb_shinfo(skb)->gso_segs ?: 1;
764#ifdef IXGBE_FCOE
765 /* adjust for FCoE Sequence Offload */
766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
767 && skb_is_gso(skb)
768 && vlan_get_protocol(skb) ==
769 htons(ETH_P_FCOE)) {
770 hlen = skb_transport_offset(skb) +
771 sizeof(struct fc_frame_header) +
772 sizeof(struct fcoe_crc_eof);
773 segs = DIV_ROUND_UP(skb->len - hlen,
774 skb_shinfo(skb)->gso_size);
775 }
776#endif /* IXGBE_FCOE */
777 /* multiply data chunks by size of headers */
778 bytecount = ((segs - 1) * hlen) + skb->len;
779 total_packets += segs;
780 total_bytes += bytecount;
781 }
782
783 ixgbe_unmap_and_free_tx_resource(adapter,
784 tx_buffer_info);
785 840
786 tx_desc->wb.status = 0; 841 tx_desc->wb.status = 0;
842 cleaned = (i == eop);
787 843
788 i++; 844 i++;
789 if (i == tx_ring->count) 845 if (i == tx_ring->count)
790 i = 0; 846 i = 0;
847
848 if (cleaned && tx_buffer_info->skb) {
849 total_bytes += tx_buffer_info->bytecount;
850 total_packets += tx_buffer_info->gso_segs;
851 }
852
853 ixgbe_unmap_and_free_tx_resource(tx_ring,
854 tx_buffer_info);
791 } 855 }
792 856
857 tx_ring->tx_stats.completed++;
793 eop = tx_ring->tx_buffer_info[i].next_to_watch; 858 eop = tx_ring->tx_buffer_info[i].next_to_watch;
794 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); 859 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
795 } 860 }
796 861
797 tx_ring->next_to_clean = i; 862 tx_ring->next_to_clean = i;
863 tx_ring->total_bytes += total_bytes;
864 tx_ring->total_packets += total_packets;
865 u64_stats_update_begin(&tx_ring->syncp);
866 tx_ring->stats.packets += total_packets;
867 tx_ring->stats.bytes += total_bytes;
868 u64_stats_update_end(&tx_ring->syncp);
869
870 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
871 /* schedule immediate reset if we believe we hung */
872 struct ixgbe_hw *hw = &adapter->hw;
873 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
874 e_err(drv, "Detected Tx Unit Hang\n"
875 " Tx Queue <%d>\n"
876 " TDH, TDT <%x>, <%x>\n"
877 " next_to_use <%x>\n"
878 " next_to_clean <%x>\n"
879 "tx_buffer_info[next_to_clean]\n"
880 " time_stamp <%lx>\n"
881 " jiffies <%lx>\n",
882 tx_ring->queue_index,
883 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
884 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
885 tx_ring->next_to_use, eop,
886 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
887
888 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
889
890 e_info(probe,
891 "tx hang %d detected on queue %d, resetting adapter\n",
892 adapter->tx_timeout_count + 1, tx_ring->queue_index);
893
894 /* schedule immediate reset if we believe we hung */
895 ixgbe_tx_timeout(adapter->netdev);
896
897 /* the adapter is about to reset, no point in enabling stuff */
898 return true;
899 }
798 900
799#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 901#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
800 if (unlikely(count && netif_carrier_ok(netdev) && 902 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
801 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 903 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
802 /* Make sure that anybody stopping the queue after this 904 /* Make sure that anybody stopping the queue after this
803 * sees the new next_to_clean. 905 * sees the new next_to_clean.
804 */ 906 */
805 smp_mb(); 907 smp_mb();
806 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 908 if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
807 !test_bit(__IXGBE_DOWN, &adapter->state)) { 909 !test_bit(__IXGBE_DOWN, &adapter->state)) {
808 netif_wake_subqueue(netdev, tx_ring->queue_index); 910 netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
809 ++tx_ring->restart_queue; 911 ++tx_ring->tx_stats.restart_queue;
810 }
811 }
812
813 if (adapter->detect_tx_hung) {
814 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
815 /* schedule immediate reset if we believe we hung */
816 e_info(probe, "tx hang %d detected, resetting "
817 "adapter\n", adapter->tx_timeout_count + 1);
818 ixgbe_tx_timeout(adapter->netdev);
819 } 912 }
820 } 913 }
821 914
822 /* re-arm the interrupt */
823 if (count >= tx_ring->work_limit)
824 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
825
826 tx_ring->total_bytes += total_bytes;
827 tx_ring->total_packets += total_packets;
828 u64_stats_update_begin(&tx_ring->syncp);
829 tx_ring->stats.packets += total_packets;
830 tx_ring->stats.bytes += total_bytes;
831 u64_stats_update_end(&tx_ring->syncp);
832 return count < tx_ring->work_limit; 915 return count < tx_ring->work_limit;
833} 916}
834 917
835#ifdef CONFIG_IXGBE_DCA 918#ifdef CONFIG_IXGBE_DCA
836static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 919static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
837 struct ixgbe_ring *rx_ring) 920 struct ixgbe_ring *rx_ring,
921 int cpu)
838{ 922{
923 struct ixgbe_hw *hw = &adapter->hw;
839 u32 rxctrl; 924 u32 rxctrl;
840 int cpu = get_cpu(); 925 u8 reg_idx = rx_ring->reg_idx;
841 int q = rx_ring->reg_idx; 926
842 927 rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
843 if (rx_ring->cpu != cpu) { 928 switch (hw->mac.type) {
844 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 929 case ixgbe_mac_82598EB:
845 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 930 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
846 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 931 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
847 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 932 break;
848 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 933 case ixgbe_mac_82599EB:
849 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; 934 case ixgbe_mac_X540:
850 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 935 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
851 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); 936 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
852 } 937 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
853 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 938 break;
854 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 939 default:
855 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 940 break;
856 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
857 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
858 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
859 rx_ring->cpu = cpu;
860 } 941 }
861 put_cpu(); 942 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
943 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
944 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
945 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
946 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
947 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
862} 948}
863 949
864static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 950static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
865 struct ixgbe_ring *tx_ring) 951 struct ixgbe_ring *tx_ring,
952 int cpu)
866{ 953{
954 struct ixgbe_hw *hw = &adapter->hw;
867 u32 txctrl; 955 u32 txctrl;
956 u8 reg_idx = tx_ring->reg_idx;
957
958 switch (hw->mac.type) {
959 case ixgbe_mac_82598EB:
960 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
961 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
962 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
963 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
964 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
965 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
966 break;
967 case ixgbe_mac_82599EB:
968 case ixgbe_mac_X540:
969 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
970 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
971 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
972 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
973 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
974 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
975 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
976 break;
977 default:
978 break;
979 }
980}
981
982static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
983{
984 struct ixgbe_adapter *adapter = q_vector->adapter;
868 int cpu = get_cpu(); 985 int cpu = get_cpu();
869 int q = tx_ring->reg_idx; 986 long r_idx;
870 struct ixgbe_hw *hw = &adapter->hw; 987 int i;
871 988
872 if (tx_ring->cpu != cpu) { 989 if (q_vector->cpu == cpu)
873 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 990 goto out_no_update;
874 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q)); 991
875 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 992 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
876 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 993 for (i = 0; i < q_vector->txr_count; i++) {
877 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 994 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
878 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl); 995 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
879 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 996 r_idx + 1);
880 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); 997 }
881 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; 998
882 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 999 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
883 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); 1000 for (i = 0; i < q_vector->rxr_count; i++) {
884 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 1001 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
885 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); 1002 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
886 } 1003 r_idx + 1);
887 tx_ring->cpu = cpu;
888 } 1004 }
1005
1006 q_vector->cpu = cpu;
1007out_no_update:
889 put_cpu(); 1008 put_cpu();
890} 1009}
891 1010
892static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) 1011static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
893{ 1012{
1013 int num_q_vectors;
894 int i; 1014 int i;
895 1015
896 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) 1016 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -899,22 +1019,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
899 /* always use CB2 mode, difference is masked in the CB driver */ 1019 /* always use CB2 mode, difference is masked in the CB driver */
900 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 1020 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
901 1021
902 for (i = 0; i < adapter->num_tx_queues; i++) { 1022 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
903 adapter->tx_ring[i]->cpu = -1; 1023 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
904 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); 1024 else
905 } 1025 num_q_vectors = 1;
906 for (i = 0; i < adapter->num_rx_queues; i++) { 1026
907 adapter->rx_ring[i]->cpu = -1; 1027 for (i = 0; i < num_q_vectors; i++) {
908 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); 1028 adapter->q_vector[i]->cpu = -1;
1029 ixgbe_update_dca(adapter->q_vector[i]);
909 } 1030 }
910} 1031}
911 1032
912static int __ixgbe_notify_dca(struct device *dev, void *data) 1033static int __ixgbe_notify_dca(struct device *dev, void *data)
913{ 1034{
914 struct net_device *netdev = dev_get_drvdata(dev); 1035 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
915 struct ixgbe_adapter *adapter = netdev_priv(netdev);
916 unsigned long event = *(unsigned long *)data; 1036 unsigned long event = *(unsigned long *)data;
917 1037
1038 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1039 return 0;
1040
918 switch (event) { 1041 switch (event) {
919 case DCA_PROVIDER_ADD: 1042 case DCA_PROVIDER_ADD:
920 /* if we're already enabled, don't do it again */ 1043 /* if we're already enabled, don't do it again */
@@ -1013,8 +1136,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1013 skb->ip_summed = CHECKSUM_UNNECESSARY; 1136 skb->ip_summed = CHECKSUM_UNNECESSARY;
1014} 1137}
1015 1138
1016static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, 1139static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1017 struct ixgbe_ring *rx_ring, u32 val)
1018{ 1140{
1019 /* 1141 /*
1020 * Force memory writes to complete before letting h/w 1142 * Force memory writes to complete before letting h/w
@@ -1023,72 +1145,81 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1023 * such as IA-64). 1145 * such as IA-64).
1024 */ 1146 */
1025 wmb(); 1147 wmb();
1026 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); 1148 writel(val, rx_ring->tail);
1027} 1149}
1028 1150
1029/** 1151/**
1030 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 1152 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1031 * @adapter: address of board private structure 1153 * @rx_ring: ring to place buffers on
1154 * @cleaned_count: number of buffers to replace
1032 **/ 1155 **/
1033void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 1156void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1034 struct ixgbe_ring *rx_ring,
1035 int cleaned_count)
1036{ 1157{
1037 struct net_device *netdev = adapter->netdev;
1038 struct pci_dev *pdev = adapter->pdev;
1039 union ixgbe_adv_rx_desc *rx_desc; 1158 union ixgbe_adv_rx_desc *rx_desc;
1040 struct ixgbe_rx_buffer *bi; 1159 struct ixgbe_rx_buffer *bi;
1041 unsigned int i; 1160 struct sk_buff *skb;
1042 unsigned int bufsz = rx_ring->rx_buf_len; 1161 u16 i = rx_ring->next_to_use;
1043 1162
1044 i = rx_ring->next_to_use; 1163 /* do nothing if no valid netdev defined */
1045 bi = &rx_ring->rx_buffer_info[i]; 1164 if (!rx_ring->netdev)
1165 return;
1046 1166
1047 while (cleaned_count--) { 1167 while (cleaned_count--) {
1048 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1168 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1169 bi = &rx_ring->rx_buffer_info[i];
1170 skb = bi->skb;
1049 1171
1050 if (!bi->page_dma && 1172 if (!skb) {
1051 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { 1173 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1052 if (!bi->page) { 1174 rx_ring->rx_buf_len);
1053 bi->page = netdev_alloc_page(netdev);
1054 if (!bi->page) {
1055 adapter->alloc_rx_page_failed++;
1056 goto no_buffers;
1057 }
1058 bi->page_offset = 0;
1059 } else {
1060 /* use a half page if we're re-using */
1061 bi->page_offset ^= (PAGE_SIZE / 2);
1062 }
1063
1064 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
1065 bi->page_offset,
1066 (PAGE_SIZE / 2),
1067 DMA_FROM_DEVICE);
1068 }
1069
1070 if (!bi->skb) {
1071 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
1072 bufsz);
1073 bi->skb = skb;
1074
1075 if (!skb) { 1175 if (!skb) {
1076 adapter->alloc_rx_buff_failed++; 1176 rx_ring->rx_stats.alloc_rx_buff_failed++;
1077 goto no_buffers; 1177 goto no_buffers;
1078 } 1178 }
1079 /* initialize queue mapping */ 1179 /* initialize queue mapping */
1080 skb_record_rx_queue(skb, rx_ring->queue_index); 1180 skb_record_rx_queue(skb, rx_ring->queue_index);
1181 bi->skb = skb;
1081 } 1182 }
1082 1183
1083 if (!bi->dma) { 1184 if (!bi->dma) {
1084 bi->dma = dma_map_single(&pdev->dev, 1185 bi->dma = dma_map_single(rx_ring->dev,
1085 bi->skb->data, 1186 skb->data,
1086 rx_ring->rx_buf_len, 1187 rx_ring->rx_buf_len,
1087 DMA_FROM_DEVICE); 1188 DMA_FROM_DEVICE);
1189 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1190 rx_ring->rx_stats.alloc_rx_buff_failed++;
1191 bi->dma = 0;
1192 goto no_buffers;
1193 }
1088 } 1194 }
1089 /* Refresh the desc even if buffer_addrs didn't change because 1195
1090 * each write-back erases this info. */ 1196 if (ring_is_ps_enabled(rx_ring)) {
1091 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1197 if (!bi->page) {
1198 bi->page = netdev_alloc_page(rx_ring->netdev);
1199 if (!bi->page) {
1200 rx_ring->rx_stats.alloc_rx_page_failed++;
1201 goto no_buffers;
1202 }
1203 }
1204
1205 if (!bi->page_dma) {
1206 /* use a half page if we're re-using */
1207 bi->page_offset ^= PAGE_SIZE / 2;
1208 bi->page_dma = dma_map_page(rx_ring->dev,
1209 bi->page,
1210 bi->page_offset,
1211 PAGE_SIZE / 2,
1212 DMA_FROM_DEVICE);
1213 if (dma_mapping_error(rx_ring->dev,
1214 bi->page_dma)) {
1215 rx_ring->rx_stats.alloc_rx_page_failed++;
1216 bi->page_dma = 0;
1217 goto no_buffers;
1218 }
1219 }
1220
1221 /* Refresh the desc even if buffer_addrs didn't change
1222 * because each write-back erases this info. */
1092 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 1223 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1093 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 1224 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1094 } else { 1225 } else {
@@ -1099,56 +1230,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1099 i++; 1230 i++;
1100 if (i == rx_ring->count) 1231 if (i == rx_ring->count)
1101 i = 0; 1232 i = 0;
1102 bi = &rx_ring->rx_buffer_info[i];
1103 } 1233 }
1104 1234
1105no_buffers: 1235no_buffers:
1106 if (rx_ring->next_to_use != i) { 1236 if (rx_ring->next_to_use != i) {
1107 rx_ring->next_to_use = i; 1237 rx_ring->next_to_use = i;
1108 if (i-- == 0) 1238 ixgbe_release_rx_desc(rx_ring, i);
1109 i = (rx_ring->count - 1);
1110
1111 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
1112 } 1239 }
1113} 1240}
1114 1241
1115static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) 1242static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
1116{ 1243{
1117 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; 1244 /* HW will not DMA in data larger than the given buffer, even if it
1118} 1245 * parses the (NFS, of course) header to be larger. In that case, it
1119 1246 * fills the header buffer and spills the rest into the page.
1120static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) 1247 */
1121{ 1248 u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
1122 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1249 u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1123} 1250 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1124 1251 if (hlen > IXGBE_RX_HDR_SIZE)
1125static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) 1252 hlen = IXGBE_RX_HDR_SIZE;
1126{ 1253 return hlen;
1127 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1128 IXGBE_RXDADV_RSCCNT_MASK) >>
1129 IXGBE_RXDADV_RSCCNT_SHIFT;
1130} 1254}
1131 1255
1132/** 1256/**
1133 * ixgbe_transform_rsc_queue - change rsc queue into a full packet 1257 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1134 * @skb: pointer to the last skb in the rsc queue 1258 * @skb: pointer to the last skb in the rsc queue
1135 * @count: pointer to number of packets coalesced in this context
1136 * 1259 *
1137 * This function changes a queue full of hw rsc buffers into a completed 1260 * This function changes a queue full of hw rsc buffers into a completed
1138 * packet. It uses the ->prev pointers to find the first packet and then 1261 * packet. It uses the ->prev pointers to find the first packet and then
1139 * turns it into the frag list owner. 1262 * turns it into the frag list owner.
1140 **/ 1263 **/
1141static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, 1264static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
1142 u64 *count)
1143{ 1265{
1144 unsigned int frag_list_size = 0; 1266 unsigned int frag_list_size = 0;
1267 unsigned int skb_cnt = 1;
1145 1268
1146 while (skb->prev) { 1269 while (skb->prev) {
1147 struct sk_buff *prev = skb->prev; 1270 struct sk_buff *prev = skb->prev;
1148 frag_list_size += skb->len; 1271 frag_list_size += skb->len;
1149 skb->prev = NULL; 1272 skb->prev = NULL;
1150 skb = prev; 1273 skb = prev;
1151 *count += 1; 1274 skb_cnt++;
1152 } 1275 }
1153 1276
1154 skb_shinfo(skb)->frag_list = skb->next; 1277 skb_shinfo(skb)->frag_list = skb->next;
@@ -1156,68 +1279,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1156 skb->len += frag_list_size; 1279 skb->len += frag_list_size;
1157 skb->data_len += frag_list_size; 1280 skb->data_len += frag_list_size;
1158 skb->truesize += frag_list_size; 1281 skb->truesize += frag_list_size;
1282 IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
1283
1159 return skb; 1284 return skb;
1160} 1285}
1161 1286
1162struct ixgbe_rsc_cb { 1287static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
1163 dma_addr_t dma; 1288{
1164 bool delay_unmap; 1289 return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1165}; 1290 IXGBE_RXDADV_RSCCNT_MASK);
1166 1291}
1167#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1168 1292
1169static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1293static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1170 struct ixgbe_ring *rx_ring, 1294 struct ixgbe_ring *rx_ring,
1171 int *work_done, int work_to_do) 1295 int *work_done, int work_to_do)
1172{ 1296{
1173 struct ixgbe_adapter *adapter = q_vector->adapter; 1297 struct ixgbe_adapter *adapter = q_vector->adapter;
1174 struct pci_dev *pdev = adapter->pdev;
1175 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 1298 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1176 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 1299 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1177 struct sk_buff *skb; 1300 struct sk_buff *skb;
1178 unsigned int i, rsc_count = 0;
1179 u32 len, staterr;
1180 u16 hdr_info;
1181 bool cleaned = false;
1182 int cleaned_count = 0;
1183 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1301 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1302 const int current_node = numa_node_id();
1184#ifdef IXGBE_FCOE 1303#ifdef IXGBE_FCOE
1185 int ddp_bytes = 0; 1304 int ddp_bytes = 0;
1186#endif /* IXGBE_FCOE */ 1305#endif /* IXGBE_FCOE */
1306 u32 staterr;
1307 u16 i;
1308 u16 cleaned_count = 0;
1309 bool pkt_is_rsc = false;
1187 1310
1188 i = rx_ring->next_to_clean; 1311 i = rx_ring->next_to_clean;
1189 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1312 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1190 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1313 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1191 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1192 1314
1193 while (staterr & IXGBE_RXD_STAT_DD) { 1315 while (staterr & IXGBE_RXD_STAT_DD) {
1194 u32 upper_len = 0; 1316 u32 upper_len = 0;
1195 if (*work_done >= work_to_do)
1196 break;
1197 (*work_done)++;
1198 1317
1199 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1318 rmb(); /* read descriptor and rx_buffer_info after status DD */
1200 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1201 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1202 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1203 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1204 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1205 if ((len > IXGBE_RX_HDR_SIZE) ||
1206 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1207 len = IXGBE_RX_HDR_SIZE;
1208 } else {
1209 len = le16_to_cpu(rx_desc->wb.upper.length);
1210 }
1211 1319
1212 cleaned = true; 1320 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1321
1213 skb = rx_buffer_info->skb; 1322 skb = rx_buffer_info->skb;
1214 prefetch(skb->data);
1215 rx_buffer_info->skb = NULL; 1323 rx_buffer_info->skb = NULL;
1324 prefetch(skb->data);
1216 1325
1326 if (ring_is_rsc_enabled(rx_ring))
1327 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
1328
1329 /* if this is a skb from previous receive DMA will be 0 */
1217 if (rx_buffer_info->dma) { 1330 if (rx_buffer_info->dma) {
1218 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 1331 u16 hlen;
1219 (!(staterr & IXGBE_RXD_STAT_EOP)) && 1332 if (pkt_is_rsc &&
1220 (!(skb->prev))) { 1333 !(staterr & IXGBE_RXD_STAT_EOP) &&
1334 !skb->prev) {
1221 /* 1335 /*
1222 * When HWRSC is enabled, delay unmapping 1336 * When HWRSC is enabled, delay unmapping
1223 * of the first packet. It carries the 1337 * of the first packet. It carries the
@@ -1228,29 +1342,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1228 IXGBE_RSC_CB(skb)->delay_unmap = true; 1342 IXGBE_RSC_CB(skb)->delay_unmap = true;
1229 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1343 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1230 } else { 1344 } else {
1231 dma_unmap_single(&pdev->dev, 1345 dma_unmap_single(rx_ring->dev,
1232 rx_buffer_info->dma, 1346 rx_buffer_info->dma,
1233 rx_ring->rx_buf_len, 1347 rx_ring->rx_buf_len,
1234 DMA_FROM_DEVICE); 1348 DMA_FROM_DEVICE);
1235 } 1349 }
1236 rx_buffer_info->dma = 0; 1350 rx_buffer_info->dma = 0;
1237 skb_put(skb, len); 1351
1352 if (ring_is_ps_enabled(rx_ring)) {
1353 hlen = ixgbe_get_hlen(rx_desc);
1354 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1355 } else {
1356 hlen = le16_to_cpu(rx_desc->wb.upper.length);
1357 }
1358
1359 skb_put(skb, hlen);
1360 } else {
1361 /* assume packet split since header is unmapped */
1362 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1238 } 1363 }
1239 1364
1240 if (upper_len) { 1365 if (upper_len) {
1241 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 1366 dma_unmap_page(rx_ring->dev,
1242 PAGE_SIZE / 2, DMA_FROM_DEVICE); 1367 rx_buffer_info->page_dma,
1368 PAGE_SIZE / 2,
1369 DMA_FROM_DEVICE);
1243 rx_buffer_info->page_dma = 0; 1370 rx_buffer_info->page_dma = 0;
1244 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1371 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1245 rx_buffer_info->page, 1372 rx_buffer_info->page,
1246 rx_buffer_info->page_offset, 1373 rx_buffer_info->page_offset,
1247 upper_len); 1374 upper_len);
1248 1375
1249 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 1376 if ((page_count(rx_buffer_info->page) == 1) &&
1250 (page_count(rx_buffer_info->page) != 1)) 1377 (page_to_nid(rx_buffer_info->page) == current_node))
1251 rx_buffer_info->page = NULL;
1252 else
1253 get_page(rx_buffer_info->page); 1378 get_page(rx_buffer_info->page);
1379 else
1380 rx_buffer_info->page = NULL;
1254 1381
1255 skb->len += upper_len; 1382 skb->len += upper_len;
1256 skb->data_len += upper_len; 1383 skb->data_len += upper_len;
@@ -1265,10 +1392,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1265 prefetch(next_rxd); 1392 prefetch(next_rxd);
1266 cleaned_count++; 1393 cleaned_count++;
1267 1394
1268 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 1395 if (pkt_is_rsc) {
1269 rsc_count = ixgbe_get_rsc_count(rx_desc);
1270
1271 if (rsc_count) {
1272 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> 1396 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1273 IXGBE_RXDADV_NEXTP_SHIFT; 1397 IXGBE_RXDADV_NEXTP_SHIFT;
1274 next_buffer = &rx_ring->rx_buffer_info[nextp]; 1398 next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1276,32 +1400,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1276 next_buffer = &rx_ring->rx_buffer_info[i]; 1400 next_buffer = &rx_ring->rx_buffer_info[i];
1277 } 1401 }
1278 1402
1279 if (staterr & IXGBE_RXD_STAT_EOP) { 1403 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
1280 if (skb->prev) 1404 if (ring_is_ps_enabled(rx_ring)) {
1281 skb = ixgbe_transform_rsc_queue(skb,
1282 &(rx_ring->rsc_count));
1283 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1284 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1285 dma_unmap_single(&pdev->dev,
1286 IXGBE_RSC_CB(skb)->dma,
1287 rx_ring->rx_buf_len,
1288 DMA_FROM_DEVICE);
1289 IXGBE_RSC_CB(skb)->dma = 0;
1290 IXGBE_RSC_CB(skb)->delay_unmap = false;
1291 }
1292 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1293 rx_ring->rsc_count +=
1294 skb_shinfo(skb)->nr_frags;
1295 else
1296 rx_ring->rsc_count++;
1297 rx_ring->rsc_flush++;
1298 }
1299 u64_stats_update_begin(&rx_ring->syncp);
1300 rx_ring->stats.packets++;
1301 rx_ring->stats.bytes += skb->len;
1302 u64_stats_update_end(&rx_ring->syncp);
1303 } else {
1304 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1305 rx_buffer_info->skb = next_buffer->skb; 1405 rx_buffer_info->skb = next_buffer->skb;
1306 rx_buffer_info->dma = next_buffer->dma; 1406 rx_buffer_info->dma = next_buffer->dma;
1307 next_buffer->skb = skb; 1407 next_buffer->skb = skb;
@@ -1310,12 +1410,45 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1310 skb->next = next_buffer->skb; 1410 skb->next = next_buffer->skb;
1311 skb->next->prev = skb; 1411 skb->next->prev = skb;
1312 } 1412 }
1313 rx_ring->non_eop_descs++; 1413 rx_ring->rx_stats.non_eop_descs++;
1314 goto next_desc; 1414 goto next_desc;
1315 } 1415 }
1316 1416
1417 if (skb->prev) {
1418 skb = ixgbe_transform_rsc_queue(skb);
1419 /* if we got here without RSC the packet is invalid */
1420 if (!pkt_is_rsc) {
1421 __pskb_trim(skb, 0);
1422 rx_buffer_info->skb = skb;
1423 goto next_desc;
1424 }
1425 }
1426
1427 if (ring_is_rsc_enabled(rx_ring)) {
1428 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1429 dma_unmap_single(rx_ring->dev,
1430 IXGBE_RSC_CB(skb)->dma,
1431 rx_ring->rx_buf_len,
1432 DMA_FROM_DEVICE);
1433 IXGBE_RSC_CB(skb)->dma = 0;
1434 IXGBE_RSC_CB(skb)->delay_unmap = false;
1435 }
1436 }
1437 if (pkt_is_rsc) {
1438 if (ring_is_ps_enabled(rx_ring))
1439 rx_ring->rx_stats.rsc_count +=
1440 skb_shinfo(skb)->nr_frags;
1441 else
1442 rx_ring->rx_stats.rsc_count +=
1443 IXGBE_RSC_CB(skb)->skb_cnt;
1444 rx_ring->rx_stats.rsc_flush++;
1445 }
1446
1447 /* ERR_MASK will only have valid bits if EOP set */
1317 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { 1448 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
1318 dev_kfree_skb_irq(skb); 1449 /* trim packet back to size 0 and recycle it */
1450 __pskb_trim(skb, 0);
1451 rx_buffer_info->skb = skb;
1319 goto next_desc; 1452 goto next_desc;
1320 } 1453 }
1321 1454
@@ -1325,7 +1458,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1325 total_rx_bytes += skb->len; 1458 total_rx_bytes += skb->len;
1326 total_rx_packets++; 1459 total_rx_packets++;
1327 1460
1328 skb->protocol = eth_type_trans(skb, adapter->netdev); 1461 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1329#ifdef IXGBE_FCOE 1462#ifdef IXGBE_FCOE
1330 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 1463 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1331 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 1464 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
@@ -1339,16 +1472,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1339next_desc: 1472next_desc:
1340 rx_desc->wb.upper.status_error = 0; 1473 rx_desc->wb.upper.status_error = 0;
1341 1474
1475 (*work_done)++;
1476 if (*work_done >= work_to_do)
1477 break;
1478
1342 /* return some buffers to hardware, one at a time is too slow */ 1479 /* return some buffers to hardware, one at a time is too slow */
1343 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 1480 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1344 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 1481 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1345 cleaned_count = 0; 1482 cleaned_count = 0;
1346 } 1483 }
1347 1484
1348 /* use prefetched values */ 1485 /* use prefetched values */
1349 rx_desc = next_rxd; 1486 rx_desc = next_rxd;
1350 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1351
1352 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1487 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1353 } 1488 }
1354 1489
@@ -1356,14 +1491,14 @@ next_desc:
1356 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 1491 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1357 1492
1358 if (cleaned_count) 1493 if (cleaned_count)
1359 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 1494 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1360 1495
1361#ifdef IXGBE_FCOE 1496#ifdef IXGBE_FCOE
1362 /* include DDPed FCoE data */ 1497 /* include DDPed FCoE data */
1363 if (ddp_bytes > 0) { 1498 if (ddp_bytes > 0) {
1364 unsigned int mss; 1499 unsigned int mss;
1365 1500
1366 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - 1501 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
1367 sizeof(struct fc_frame_header) - 1502 sizeof(struct fc_frame_header) -
1368 sizeof(struct fcoe_crc_eof); 1503 sizeof(struct fcoe_crc_eof);
1369 if (mss > 512) 1504 if (mss > 512)
@@ -1375,8 +1510,10 @@ next_desc:
1375 1510
1376 rx_ring->total_packets += total_rx_packets; 1511 rx_ring->total_packets += total_rx_packets;
1377 rx_ring->total_bytes += total_rx_bytes; 1512 rx_ring->total_bytes += total_rx_bytes;
1378 1513 u64_stats_update_begin(&rx_ring->syncp);
1379 return cleaned; 1514 rx_ring->stats.packets += total_rx_packets;
1515 rx_ring->stats.bytes += total_rx_bytes;
1516 u64_stats_update_end(&rx_ring->syncp);
1380} 1517}
1381 1518
1382static int ixgbe_clean_rxonly(struct napi_struct *, int); 1519static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1390,7 +1527,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
1390static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 1527static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1391{ 1528{
1392 struct ixgbe_q_vector *q_vector; 1529 struct ixgbe_q_vector *q_vector;
1393 int i, j, q_vectors, v_idx, r_idx; 1530 int i, q_vectors, v_idx, r_idx;
1394 u32 mask; 1531 u32 mask;
1395 1532
1396 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1533 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1406,8 +1543,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1406 adapter->num_rx_queues); 1543 adapter->num_rx_queues);
1407 1544
1408 for (i = 0; i < q_vector->rxr_count; i++) { 1545 for (i = 0; i < q_vector->rxr_count; i++) {
1409 j = adapter->rx_ring[r_idx]->reg_idx; 1546 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
1410 ixgbe_set_ivar(adapter, 0, j, v_idx); 1547 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
1411 r_idx = find_next_bit(q_vector->rxr_idx, 1548 r_idx = find_next_bit(q_vector->rxr_idx,
1412 adapter->num_rx_queues, 1549 adapter->num_rx_queues,
1413 r_idx + 1); 1550 r_idx + 1);
@@ -1416,8 +1553,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1416 adapter->num_tx_queues); 1553 adapter->num_tx_queues);
1417 1554
1418 for (i = 0; i < q_vector->txr_count; i++) { 1555 for (i = 0; i < q_vector->txr_count; i++) {
1419 j = adapter->tx_ring[r_idx]->reg_idx; 1556 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
1420 ixgbe_set_ivar(adapter, 1, j, v_idx); 1557 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
1421 r_idx = find_next_bit(q_vector->txr_idx, 1558 r_idx = find_next_bit(q_vector->txr_idx,
1422 adapter->num_tx_queues, 1559 adapter->num_tx_queues,
1423 r_idx + 1); 1560 r_idx + 1);
@@ -1448,11 +1585,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1448 } 1585 }
1449 } 1586 }
1450 1587
1451 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1588 switch (adapter->hw.mac.type) {
1589 case ixgbe_mac_82598EB:
1452 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 1590 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1453 v_idx); 1591 v_idx);
1454 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1592 break;
1593 case ixgbe_mac_82599EB:
1594 case ixgbe_mac_X540:
1455 ixgbe_set_ivar(adapter, -1, 1, v_idx); 1595 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1596 break;
1597
1598 default:
1599 break;
1600 }
1456 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 1601 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1457 1602
1458 /* set up to autoclear timer, and the vectors */ 1603 /* set up to autoclear timer, and the vectors */
@@ -1548,12 +1693,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1548 int v_idx = q_vector->v_idx; 1693 int v_idx = q_vector->v_idx;
1549 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); 1694 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1550 1695
1551 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1696 switch (adapter->hw.mac.type) {
1697 case ixgbe_mac_82598EB:
1552 /* must write high and low 16 bits to reset counter */ 1698 /* must write high and low 16 bits to reset counter */
1553 itr_reg |= (itr_reg << 16); 1699 itr_reg |= (itr_reg << 16);
1554 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1700 break;
1701 case ixgbe_mac_82599EB:
1702 case ixgbe_mac_X540:
1555 /* 1703 /*
1556 * 82599 can support a value of zero, so allow it for 1704 * 82599 and X540 can support a value of zero, so allow it for
1557 * max interrupt rate, but there is an errata where it can 1705 * max interrupt rate, but there is an errata where it can
1558 * not be zero with RSC 1706 * not be zero with RSC
1559 */ 1707 */
@@ -1566,6 +1714,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1566 * immediate assertion of the interrupt 1714 * immediate assertion of the interrupt
1567 */ 1715 */
1568 itr_reg |= IXGBE_EITR_CNT_WDIS; 1716 itr_reg |= IXGBE_EITR_CNT_WDIS;
1717 break;
1718 default:
1719 break;
1569 } 1720 }
1570 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 1721 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1571} 1722}
@@ -1573,14 +1724,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1573static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) 1724static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1574{ 1725{
1575 struct ixgbe_adapter *adapter = q_vector->adapter; 1726 struct ixgbe_adapter *adapter = q_vector->adapter;
1727 int i, r_idx;
1576 u32 new_itr; 1728 u32 new_itr;
1577 u8 current_itr, ret_itr; 1729 u8 current_itr, ret_itr;
1578 int i, r_idx;
1579 struct ixgbe_ring *rx_ring, *tx_ring;
1580 1730
1581 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1731 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1582 for (i = 0; i < q_vector->txr_count; i++) { 1732 for (i = 0; i < q_vector->txr_count; i++) {
1583 tx_ring = adapter->tx_ring[r_idx]; 1733 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
1584 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1734 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1585 q_vector->tx_itr, 1735 q_vector->tx_itr,
1586 tx_ring->total_packets, 1736 tx_ring->total_packets,
@@ -1595,7 +1745,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1595 1745
1596 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1746 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1597 for (i = 0; i < q_vector->rxr_count; i++) { 1747 for (i = 0; i < q_vector->rxr_count; i++) {
1598 rx_ring = adapter->rx_ring[r_idx]; 1748 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
1599 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1749 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1600 q_vector->rx_itr, 1750 q_vector->rx_itr,
1601 rx_ring->total_packets, 1751 rx_ring->total_packets,
@@ -1626,7 +1776,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1626 1776
1627 if (new_itr != q_vector->eitr) { 1777 if (new_itr != q_vector->eitr) {
1628 /* do an exponential smoothing */ 1778 /* do an exponential smoothing */
1629 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1779 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
1630 1780
1631 /* save the algorithm value here, not the smoothed one */ 1781 /* save the algorithm value here, not the smoothed one */
1632 q_vector->eitr = new_itr; 1782 q_vector->eitr = new_itr;
@@ -1694,17 +1844,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1694{ 1844{
1695 struct ixgbe_hw *hw = &adapter->hw; 1845 struct ixgbe_hw *hw = &adapter->hw;
1696 1846
1847 if (eicr & IXGBE_EICR_GPI_SDP2) {
1848 /* Clear the interrupt */
1849 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1850 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1851 schedule_work(&adapter->sfp_config_module_task);
1852 }
1853
1697 if (eicr & IXGBE_EICR_GPI_SDP1) { 1854 if (eicr & IXGBE_EICR_GPI_SDP1) {
1698 /* Clear the interrupt */ 1855 /* Clear the interrupt */
1699 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1856 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1700 schedule_work(&adapter->multispeed_fiber_task); 1857 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1701 } else if (eicr & IXGBE_EICR_GPI_SDP2) { 1858 schedule_work(&adapter->multispeed_fiber_task);
1702 /* Clear the interrupt */
1703 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1704 schedule_work(&adapter->sfp_config_module_task);
1705 } else {
1706 /* Interrupt isn't for us... */
1707 return;
1708 } 1859 }
1709} 1860}
1710 1861
@@ -1744,16 +1895,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1744 if (eicr & IXGBE_EICR_MAILBOX) 1895 if (eicr & IXGBE_EICR_MAILBOX)
1745 ixgbe_msg_task(adapter); 1896 ixgbe_msg_task(adapter);
1746 1897
1747 if (hw->mac.type == ixgbe_mac_82598EB) 1898 switch (hw->mac.type) {
1748 ixgbe_check_fan_failure(adapter, eicr); 1899 case ixgbe_mac_82599EB:
1749 1900 case ixgbe_mac_X540:
1750 if (hw->mac.type == ixgbe_mac_82599EB) {
1751 ixgbe_check_sfp_event(adapter, eicr);
1752 adapter->interrupt_event = eicr;
1753 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1754 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
1755 schedule_work(&adapter->check_overtemp_task);
1756
1757 /* Handle Flow Director Full threshold interrupt */ 1901 /* Handle Flow Director Full threshold interrupt */
1758 if (eicr & IXGBE_EICR_FLOW_DIR) { 1902 if (eicr & IXGBE_EICR_FLOW_DIR) {
1759 int i; 1903 int i;
@@ -1763,12 +1907,24 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1763 for (i = 0; i < adapter->num_tx_queues; i++) { 1907 for (i = 0; i < adapter->num_tx_queues; i++) {
1764 struct ixgbe_ring *tx_ring = 1908 struct ixgbe_ring *tx_ring =
1765 adapter->tx_ring[i]; 1909 adapter->tx_ring[i];
1766 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1910 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
1767 &tx_ring->reinit_state)) 1911 &tx_ring->state))
1768 schedule_work(&adapter->fdir_reinit_task); 1912 schedule_work(&adapter->fdir_reinit_task);
1769 } 1913 }
1770 } 1914 }
1915 ixgbe_check_sfp_event(adapter, eicr);
1916 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1917 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1918 adapter->interrupt_event = eicr;
1919 schedule_work(&adapter->check_overtemp_task);
1920 }
1921 break;
1922 default:
1923 break;
1771 } 1924 }
1925
1926 ixgbe_check_fan_failure(adapter, eicr);
1927
1772 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1928 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1773 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1929 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1774 1930
@@ -1779,15 +1935,24 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1779 u64 qmask) 1935 u64 qmask)
1780{ 1936{
1781 u32 mask; 1937 u32 mask;
1938 struct ixgbe_hw *hw = &adapter->hw;
1782 1939
1783 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1940 switch (hw->mac.type) {
1941 case ixgbe_mac_82598EB:
1784 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1942 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1785 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1943 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1786 } else { 1944 break;
1945 case ixgbe_mac_82599EB:
1946 case ixgbe_mac_X540:
1787 mask = (qmask & 0xFFFFFFFF); 1947 mask = (qmask & 0xFFFFFFFF);
1788 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); 1948 if (mask)
1949 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1789 mask = (qmask >> 32); 1950 mask = (qmask >> 32);
1790 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); 1951 if (mask)
1952 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1953 break;
1954 default:
1955 break;
1791 } 1956 }
1792 /* skip the flush */ 1957 /* skip the flush */
1793} 1958}
@@ -1796,15 +1961,24 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1796 u64 qmask) 1961 u64 qmask)
1797{ 1962{
1798 u32 mask; 1963 u32 mask;
1964 struct ixgbe_hw *hw = &adapter->hw;
1799 1965
1800 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1966 switch (hw->mac.type) {
1967 case ixgbe_mac_82598EB:
1801 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1968 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1802 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); 1969 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1803 } else { 1970 break;
1971 case ixgbe_mac_82599EB:
1972 case ixgbe_mac_X540:
1804 mask = (qmask & 0xFFFFFFFF); 1973 mask = (qmask & 0xFFFFFFFF);
1805 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); 1974 if (mask)
1975 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1806 mask = (qmask >> 32); 1976 mask = (qmask >> 32);
1807 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); 1977 if (mask)
1978 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1979 break;
1980 default:
1981 break;
1808 } 1982 }
1809 /* skip the flush */ 1983 /* skip the flush */
1810} 1984}
@@ -1847,8 +2021,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1847 int r_idx; 2021 int r_idx;
1848 int i; 2022 int i;
1849 2023
2024#ifdef CONFIG_IXGBE_DCA
2025 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2026 ixgbe_update_dca(q_vector);
2027#endif
2028
1850 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2029 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1851 for (i = 0; i < q_vector->rxr_count; i++) { 2030 for (i = 0; i < q_vector->rxr_count; i++) {
1852 rx_ring = adapter->rx_ring[r_idx]; 2031 rx_ring = adapter->rx_ring[r_idx];
1853 rx_ring->total_bytes = 0; 2032 rx_ring->total_bytes = 0;
1854 rx_ring->total_packets = 0; 2033 rx_ring->total_packets = 0;
@@ -1859,7 +2038,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1859 if (!q_vector->rxr_count) 2038 if (!q_vector->rxr_count)
1860 return IRQ_HANDLED; 2039 return IRQ_HANDLED;
1861 2040
1862 /* disable interrupts on this vector only */
1863 /* EIAM disabled interrupts (on this vector) for us */ 2041 /* EIAM disabled interrupts (on this vector) for us */
1864 napi_schedule(&q_vector->napi); 2042 napi_schedule(&q_vector->napi);
1865 2043
@@ -1918,13 +2096,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1918 int work_done = 0; 2096 int work_done = 0;
1919 long r_idx; 2097 long r_idx;
1920 2098
1921 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1922 rx_ring = adapter->rx_ring[r_idx];
1923#ifdef CONFIG_IXGBE_DCA 2099#ifdef CONFIG_IXGBE_DCA
1924 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2100 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1925 ixgbe_update_rx_dca(adapter, rx_ring); 2101 ixgbe_update_dca(q_vector);
1926#endif 2102#endif
1927 2103
2104 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2105 rx_ring = adapter->rx_ring[r_idx];
2106
1928 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 2107 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1929 2108
1930 /* If all Rx work done, exit the polling mode */ 2109 /* If all Rx work done, exit the polling mode */
@@ -1958,13 +2137,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1958 long r_idx; 2137 long r_idx;
1959 bool tx_clean_complete = true; 2138 bool tx_clean_complete = true;
1960 2139
2140#ifdef CONFIG_IXGBE_DCA
2141 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2142 ixgbe_update_dca(q_vector);
2143#endif
2144
1961 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2145 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1962 for (i = 0; i < q_vector->txr_count; i++) { 2146 for (i = 0; i < q_vector->txr_count; i++) {
1963 ring = adapter->tx_ring[r_idx]; 2147 ring = adapter->tx_ring[r_idx];
1964#ifdef CONFIG_IXGBE_DCA
1965 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1966 ixgbe_update_tx_dca(adapter, ring);
1967#endif
1968 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); 2148 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1969 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 2149 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1970 r_idx + 1); 2150 r_idx + 1);
@@ -1977,10 +2157,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1977 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2157 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1978 for (i = 0; i < q_vector->rxr_count; i++) { 2158 for (i = 0; i < q_vector->rxr_count; i++) {
1979 ring = adapter->rx_ring[r_idx]; 2159 ring = adapter->rx_ring[r_idx];
1980#ifdef CONFIG_IXGBE_DCA
1981 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1982 ixgbe_update_rx_dca(adapter, ring);
1983#endif
1984 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 2160 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1985 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 2161 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1986 r_idx + 1); 2162 r_idx + 1);
@@ -2019,13 +2195,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2019 int work_done = 0; 2195 int work_done = 0;
2020 long r_idx; 2196 long r_idx;
2021 2197
2022 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2023 tx_ring = adapter->tx_ring[r_idx];
2024#ifdef CONFIG_IXGBE_DCA 2198#ifdef CONFIG_IXGBE_DCA
2025 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2199 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2026 ixgbe_update_tx_dca(adapter, tx_ring); 2200 ixgbe_update_dca(q_vector);
2027#endif 2201#endif
2028 2202
2203 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2204 tx_ring = adapter->tx_ring[r_idx];
2205
2029 if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) 2206 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2030 work_done = budget; 2207 work_done = budget;
2031 2208
@@ -2046,24 +2223,27 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2046 int r_idx) 2223 int r_idx)
2047{ 2224{
2048 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2225 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2226 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
2049 2227
2050 set_bit(r_idx, q_vector->rxr_idx); 2228 set_bit(r_idx, q_vector->rxr_idx);
2051 q_vector->rxr_count++; 2229 q_vector->rxr_count++;
2230 rx_ring->q_vector = q_vector;
2052} 2231}
2053 2232
2054static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 2233static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2055 int t_idx) 2234 int t_idx)
2056{ 2235{
2057 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2236 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2237 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
2058 2238
2059 set_bit(t_idx, q_vector->txr_idx); 2239 set_bit(t_idx, q_vector->txr_idx);
2060 q_vector->txr_count++; 2240 q_vector->txr_count++;
2241 tx_ring->q_vector = q_vector;
2061} 2242}
2062 2243
2063/** 2244/**
2064 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors 2245 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2065 * @adapter: board private structure to initialize 2246 * @adapter: board private structure to initialize
2066 * @vectors: allotted vector count for descriptor rings
2067 * 2247 *
2068 * This function maps descriptor rings to the queue-specific vectors 2248 * This function maps descriptor rings to the queue-specific vectors
2069 * we were allotted through the MSI-X enabling code. Ideally, we'd have 2249 * we were allotted through the MSI-X enabling code. Ideally, we'd have
@@ -2071,9 +2251,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2071 * group the rings as "efficiently" as possible. You would add new 2251 * group the rings as "efficiently" as possible. You would add new
2072 * mapping configurations in here. 2252 * mapping configurations in here.
2073 **/ 2253 **/
2074static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 2254static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
2075 int vectors)
2076{ 2255{
2256 int q_vectors;
2077 int v_start = 0; 2257 int v_start = 0;
2078 int rxr_idx = 0, txr_idx = 0; 2258 int rxr_idx = 0, txr_idx = 0;
2079 int rxr_remaining = adapter->num_rx_queues; 2259 int rxr_remaining = adapter->num_rx_queues;
@@ -2086,11 +2266,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2086 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 2266 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2087 goto out; 2267 goto out;
2088 2268
2269 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2270
2089 /* 2271 /*
2090 * The ideal configuration... 2272 * The ideal configuration...
2091 * We have enough vectors to map one per queue. 2273 * We have enough vectors to map one per queue.
2092 */ 2274 */
2093 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 2275 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
2094 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 2276 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2095 map_vector_to_rxq(adapter, v_start, rxr_idx); 2277 map_vector_to_rxq(adapter, v_start, rxr_idx);
2096 2278
@@ -2106,23 +2288,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2106 * multiple queues per vector. 2288 * multiple queues per vector.
2107 */ 2289 */
2108 /* Re-adjusting *qpv takes care of the remainder. */ 2290 /* Re-adjusting *qpv takes care of the remainder. */
2109 for (i = v_start; i < vectors; i++) { 2291 for (i = v_start; i < q_vectors; i++) {
2110 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); 2292 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
2111 for (j = 0; j < rqpv; j++) { 2293 for (j = 0; j < rqpv; j++) {
2112 map_vector_to_rxq(adapter, i, rxr_idx); 2294 map_vector_to_rxq(adapter, i, rxr_idx);
2113 rxr_idx++; 2295 rxr_idx++;
2114 rxr_remaining--; 2296 rxr_remaining--;
2115 } 2297 }
2116 } 2298 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
2117 for (i = v_start; i < vectors; i++) {
2118 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
2119 for (j = 0; j < tqpv; j++) { 2299 for (j = 0; j < tqpv; j++) {
2120 map_vector_to_txq(adapter, i, txr_idx); 2300 map_vector_to_txq(adapter, i, txr_idx);
2121 txr_idx++; 2301 txr_idx++;
2122 txr_remaining--; 2302 txr_remaining--;
2123 } 2303 }
2124 } 2304 }
2125
2126out: 2305out:
2127 return err; 2306 return err;
2128} 2307}
@@ -2144,30 +2323,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2144 /* Decrement for Other and TCP Timer vectors */ 2323 /* Decrement for Other and TCP Timer vectors */
2145 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2324 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2146 2325
2147 /* Map the Tx/Rx rings to the vectors we were allotted. */ 2326 err = ixgbe_map_rings_to_vectors(adapter);
2148 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
2149 if (err) 2327 if (err)
2150 goto out; 2328 return err;
2151 2329
2152#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 2330#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
2153 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 2331 ? &ixgbe_msix_clean_many : \
2154 &ixgbe_msix_clean_many) 2332 (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
2333 (_v)->txr_count ? &ixgbe_msix_clean_tx : \
2334 NULL)
2155 for (vector = 0; vector < q_vectors; vector++) { 2335 for (vector = 0; vector < q_vectors; vector++) {
2156 handler = SET_HANDLER(adapter->q_vector[vector]); 2336 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2337 handler = SET_HANDLER(q_vector);
2157 2338
2158 if (handler == &ixgbe_msix_clean_rx) { 2339 if (handler == &ixgbe_msix_clean_rx) {
2159 sprintf(adapter->name[vector], "%s-%s-%d", 2340 sprintf(q_vector->name, "%s-%s-%d",
2160 netdev->name, "rx", ri++); 2341 netdev->name, "rx", ri++);
2161 } else if (handler == &ixgbe_msix_clean_tx) { 2342 } else if (handler == &ixgbe_msix_clean_tx) {
2162 sprintf(adapter->name[vector], "%s-%s-%d", 2343 sprintf(q_vector->name, "%s-%s-%d",
2163 netdev->name, "tx", ti++); 2344 netdev->name, "tx", ti++);
2164 } else 2345 } else if (handler == &ixgbe_msix_clean_many) {
2165 sprintf(adapter->name[vector], "%s-%s-%d", 2346 sprintf(q_vector->name, "%s-%s-%d",
2166 netdev->name, "TxRx", vector); 2347 netdev->name, "TxRx", ri++);
2167 2348 ti++;
2349 } else {
2350 /* skip this unused q_vector */
2351 continue;
2352 }
2168 err = request_irq(adapter->msix_entries[vector].vector, 2353 err = request_irq(adapter->msix_entries[vector].vector,
2169 handler, 0, adapter->name[vector], 2354 handler, 0, q_vector->name,
2170 adapter->q_vector[vector]); 2355 q_vector);
2171 if (err) { 2356 if (err) {
2172 e_err(probe, "request_irq failed for MSIX interrupt " 2357 e_err(probe, "request_irq failed for MSIX interrupt "
2173 "Error: %d\n", err); 2358 "Error: %d\n", err);
@@ -2175,9 +2360,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2175 } 2360 }
2176 } 2361 }
2177 2362
2178 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 2363 sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
2179 err = request_irq(adapter->msix_entries[vector].vector, 2364 err = request_irq(adapter->msix_entries[vector].vector,
2180 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 2365 ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
2181 if (err) { 2366 if (err) {
2182 e_err(probe, "request_irq for msix_lsc failed: %d\n", err); 2367 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2183 goto free_queue_irqs; 2368 goto free_queue_irqs;
@@ -2193,17 +2378,16 @@ free_queue_irqs:
2193 pci_disable_msix(adapter->pdev); 2378 pci_disable_msix(adapter->pdev);
2194 kfree(adapter->msix_entries); 2379 kfree(adapter->msix_entries);
2195 adapter->msix_entries = NULL; 2380 adapter->msix_entries = NULL;
2196out:
2197 return err; 2381 return err;
2198} 2382}
2199 2383
2200static void ixgbe_set_itr(struct ixgbe_adapter *adapter) 2384static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2201{ 2385{
2202 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 2386 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2203 u8 current_itr;
2204 u32 new_itr = q_vector->eitr;
2205 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 2387 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2206 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 2388 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2389 u32 new_itr = q_vector->eitr;
2390 u8 current_itr;
2207 2391
2208 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 2392 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
2209 q_vector->tx_itr, 2393 q_vector->tx_itr,
@@ -2233,9 +2417,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2233 2417
2234 if (new_itr != q_vector->eitr) { 2418 if (new_itr != q_vector->eitr) {
2235 /* do an exponential smoothing */ 2419 /* do an exponential smoothing */
2236 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 2420 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
2237 2421
2238 /* save the algorithm value here, not the smoothed one */ 2422 /* save the algorithm value here */
2239 q_vector->eitr = new_itr; 2423 q_vector->eitr = new_itr;
2240 2424
2241 ixgbe_write_eitr(q_vector); 2425 ixgbe_write_eitr(q_vector);
@@ -2256,12 +2440,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2256 mask |= IXGBE_EIMS_GPI_SDP0; 2440 mask |= IXGBE_EIMS_GPI_SDP0;
2257 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2441 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2258 mask |= IXGBE_EIMS_GPI_SDP1; 2442 mask |= IXGBE_EIMS_GPI_SDP1;
2259 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2443 switch (adapter->hw.mac.type) {
2444 case ixgbe_mac_82599EB:
2445 case ixgbe_mac_X540:
2260 mask |= IXGBE_EIMS_ECC; 2446 mask |= IXGBE_EIMS_ECC;
2261 mask |= IXGBE_EIMS_GPI_SDP1; 2447 mask |= IXGBE_EIMS_GPI_SDP1;
2262 mask |= IXGBE_EIMS_GPI_SDP2; 2448 mask |= IXGBE_EIMS_GPI_SDP2;
2263 if (adapter->num_vfs) 2449 if (adapter->num_vfs)
2264 mask |= IXGBE_EIMS_MAILBOX; 2450 mask |= IXGBE_EIMS_MAILBOX;
2451 break;
2452 default:
2453 break;
2265 } 2454 }
2266 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 2455 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2267 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 2456 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2317,13 +2506,21 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2317 if (eicr & IXGBE_EICR_LSC) 2506 if (eicr & IXGBE_EICR_LSC)
2318 ixgbe_check_lsc(adapter); 2507 ixgbe_check_lsc(adapter);
2319 2508
2320 if (hw->mac.type == ixgbe_mac_82599EB) 2509 switch (hw->mac.type) {
2510 case ixgbe_mac_82599EB:
2511 case ixgbe_mac_X540:
2321 ixgbe_check_sfp_event(adapter, eicr); 2512 ixgbe_check_sfp_event(adapter, eicr);
2513 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2514 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
2515 adapter->interrupt_event = eicr;
2516 schedule_work(&adapter->check_overtemp_task);
2517 }
2518 break;
2519 default:
2520 break;
2521 }
2322 2522
2323 ixgbe_check_fan_failure(adapter, eicr); 2523 ixgbe_check_fan_failure(adapter, eicr);
2324 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2325 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2326 schedule_work(&adapter->check_overtemp_task);
2327 2524
2328 if (napi_schedule_prep(&(q_vector->napi))) { 2525 if (napi_schedule_prep(&(q_vector->napi))) {
2329 adapter->tx_ring[0]->total_packets = 0; 2526 adapter->tx_ring[0]->total_packets = 0;
@@ -2416,14 +2613,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2416 **/ 2613 **/
2417static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 2614static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2418{ 2615{
2419 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2616 switch (adapter->hw.mac.type) {
2617 case ixgbe_mac_82598EB:
2420 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 2618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2421 } else { 2619 break;
2620 case ixgbe_mac_82599EB:
2621 case ixgbe_mac_X540:
2422 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 2622 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2423 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 2623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2424 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 2624 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2425 if (adapter->num_vfs > 32) 2625 if (adapter->num_vfs > 32)
2426 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); 2626 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
2627 break;
2628 default:
2629 break;
2427 } 2630 }
2428 IXGBE_WRITE_FLUSH(&adapter->hw); 2631 IXGBE_WRITE_FLUSH(&adapter->hw);
2429 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2632 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2469,7 +2672,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2469 u64 tdba = ring->dma; 2672 u64 tdba = ring->dma;
2470 int wait_loop = 10; 2673 int wait_loop = 10;
2471 u32 txdctl; 2674 u32 txdctl;
2472 u16 reg_idx = ring->reg_idx; 2675 u8 reg_idx = ring->reg_idx;
2473 2676
2474 /* disable queue to avoid issues while updating state */ 2677 /* disable queue to avoid issues while updating state */
2475 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 2678 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
@@ -2484,8 +2687,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2484 ring->count * sizeof(union ixgbe_adv_tx_desc)); 2687 ring->count * sizeof(union ixgbe_adv_tx_desc));
2485 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); 2688 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2486 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); 2689 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2487 ring->head = IXGBE_TDH(reg_idx); 2690 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2488 ring->tail = IXGBE_TDT(reg_idx);
2489 2691
2490 /* configure fetching thresholds */ 2692 /* configure fetching thresholds */
2491 if (adapter->rx_itr_setting == 0) { 2693 if (adapter->rx_itr_setting == 0) {
@@ -2501,7 +2703,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2501 } 2703 }
2502 2704
2503 /* reinitialize flowdirector state */ 2705 /* reinitialize flowdirector state */
2504 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); 2706 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2707 adapter->atr_sample_rate) {
2708 ring->atr_sample_rate = adapter->atr_sample_rate;
2709 ring->atr_count = 0;
2710 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2711 } else {
2712 ring->atr_sample_rate = 0;
2713 }
2714
2715 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2505 2716
2506 /* enable queue */ 2717 /* enable queue */
2507 txdctl |= IXGBE_TXDCTL_ENABLE; 2718 txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2592,16 +2803,22 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2592 struct ixgbe_ring *rx_ring) 2803 struct ixgbe_ring *rx_ring)
2593{ 2804{
2594 u32 srrctl; 2805 u32 srrctl;
2595 int index; 2806 u8 reg_idx = rx_ring->reg_idx;
2596 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2597 2807
2598 index = rx_ring->reg_idx; 2808 switch (adapter->hw.mac.type) {
2599 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2809 case ixgbe_mac_82598EB: {
2600 unsigned long mask; 2810 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2601 mask = (unsigned long) feature[RING_F_RSS].mask; 2811 const int mask = feature[RING_F_RSS].mask;
2602 index = index & mask; 2812 reg_idx = reg_idx & mask;
2603 } 2813 }
2604 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); 2814 break;
2815 case ixgbe_mac_82599EB:
2816 case ixgbe_mac_X540:
2817 default:
2818 break;
2819 }
2820
2821 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
2605 2822
2606 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 2823 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2607 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 2824 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
@@ -2611,7 +2828,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2611 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 2828 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2612 IXGBE_SRRCTL_BSIZEHDR_MASK; 2829 IXGBE_SRRCTL_BSIZEHDR_MASK;
2613 2830
2614 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2831 if (ring_is_ps_enabled(rx_ring)) {
2615#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER 2832#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2616 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2833 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2617#else 2834#else
@@ -2624,7 +2841,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2624 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2841 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2625 } 2842 }
2626 2843
2627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); 2844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
2628} 2845}
2629 2846
2630static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 2847static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2694,19 +2911,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2694} 2911}
2695 2912
2696/** 2913/**
2914 * ixgbe_clear_rscctl - disable RSC for the indicated ring
2915 * @adapter: address of board private structure
2916 * @ring: structure containing ring specific data
2917 **/
2918void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
2919 struct ixgbe_ring *ring)
2920{
2921 struct ixgbe_hw *hw = &adapter->hw;
2922 u32 rscctrl;
2923 u8 reg_idx = ring->reg_idx;
2924
2925 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2926 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
2927 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2928}
2929
2930/**
2697 * ixgbe_configure_rscctl - enable RSC for the indicated ring 2931 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2698 * @adapter: address of board private structure 2932 * @adapter: address of board private structure
2699 * @index: index of ring to set 2933 * @index: index of ring to set
2700 **/ 2934 **/
2701static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 2935void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2702 struct ixgbe_ring *ring) 2936 struct ixgbe_ring *ring)
2703{ 2937{
2704 struct ixgbe_hw *hw = &adapter->hw; 2938 struct ixgbe_hw *hw = &adapter->hw;
2705 u32 rscctrl; 2939 u32 rscctrl;
2706 int rx_buf_len; 2940 int rx_buf_len;
2707 u16 reg_idx = ring->reg_idx; 2941 u8 reg_idx = ring->reg_idx;
2708 2942
2709 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) 2943 if (!ring_is_rsc_enabled(ring))
2710 return; 2944 return;
2711 2945
2712 rx_buf_len = ring->rx_buf_len; 2946 rx_buf_len = ring->rx_buf_len;
@@ -2717,7 +2951,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2717 * total size of max desc * buf_len is not greater 2951 * total size of max desc * buf_len is not greater
2718 * than 65535 2952 * than 65535
2719 */ 2953 */
2720 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2954 if (ring_is_ps_enabled(ring)) {
2721#if (MAX_SKB_FRAGS > 16) 2955#if (MAX_SKB_FRAGS > 16)
2722 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2956 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2723#elif (MAX_SKB_FRAGS > 8) 2957#elif (MAX_SKB_FRAGS > 8)
@@ -2770,9 +3004,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2770 struct ixgbe_ring *ring) 3004 struct ixgbe_ring *ring)
2771{ 3005{
2772 struct ixgbe_hw *hw = &adapter->hw; 3006 struct ixgbe_hw *hw = &adapter->hw;
2773 int reg_idx = ring->reg_idx;
2774 int wait_loop = IXGBE_MAX_RX_DESC_POLL; 3007 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2775 u32 rxdctl; 3008 u32 rxdctl;
3009 u8 reg_idx = ring->reg_idx;
2776 3010
2777 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 3011 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2778 if (hw->mac.type == ixgbe_mac_82598EB && 3012 if (hw->mac.type == ixgbe_mac_82598EB &&
@@ -2796,7 +3030,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2796 struct ixgbe_hw *hw = &adapter->hw; 3030 struct ixgbe_hw *hw = &adapter->hw;
2797 u64 rdba = ring->dma; 3031 u64 rdba = ring->dma;
2798 u32 rxdctl; 3032 u32 rxdctl;
2799 u16 reg_idx = ring->reg_idx; 3033 u8 reg_idx = ring->reg_idx;
2800 3034
2801 /* disable queue to avoid issues while updating state */ 3035 /* disable queue to avoid issues while updating state */
2802 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3036 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
@@ -2810,8 +3044,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2810 ring->count * sizeof(union ixgbe_adv_rx_desc)); 3044 ring->count * sizeof(union ixgbe_adv_rx_desc));
2811 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); 3045 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2812 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); 3046 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
2813 ring->head = IXGBE_RDH(reg_idx); 3047 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
2814 ring->tail = IXGBE_RDT(reg_idx);
2815 3048
2816 ixgbe_configure_srrctl(adapter, ring); 3049 ixgbe_configure_srrctl(adapter, ring);
2817 ixgbe_configure_rscctl(adapter, ring); 3050 ixgbe_configure_rscctl(adapter, ring);
@@ -2833,7 +3066,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2833 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 3066 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2834 3067
2835 ixgbe_rx_desc_queue_enable(adapter, ring); 3068 ixgbe_rx_desc_queue_enable(adapter, ring);
2836 ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); 3069 ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
2837} 3070}
2838 3071
2839static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) 3072static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -2956,24 +3189,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2956 rx_ring->rx_buf_len = rx_buf_len; 3189 rx_ring->rx_buf_len = rx_buf_len;
2957 3190
2958 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) 3191 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2959 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; 3192 set_ring_ps_enabled(rx_ring);
3193 else
3194 clear_ring_ps_enabled(rx_ring);
3195
3196 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3197 set_ring_rsc_enabled(rx_ring);
2960 else 3198 else
2961 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 3199 clear_ring_rsc_enabled(rx_ring);
2962 3200
2963#ifdef IXGBE_FCOE 3201#ifdef IXGBE_FCOE
2964 if (netdev->features & NETIF_F_FCOE_MTU) { 3202 if (netdev->features & NETIF_F_FCOE_MTU) {
2965 struct ixgbe_ring_feature *f; 3203 struct ixgbe_ring_feature *f;
2966 f = &adapter->ring_feature[RING_F_FCOE]; 3204 f = &adapter->ring_feature[RING_F_FCOE];
2967 if ((i >= f->mask) && (i < f->mask + f->indices)) { 3205 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2968 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 3206 clear_ring_ps_enabled(rx_ring);
2969 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) 3207 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2970 rx_ring->rx_buf_len = 3208 rx_ring->rx_buf_len =
2971 IXGBE_FCOE_JUMBO_FRAME_SIZE; 3209 IXGBE_FCOE_JUMBO_FRAME_SIZE;
3210 } else if (!ring_is_rsc_enabled(rx_ring) &&
3211 !ring_is_ps_enabled(rx_ring)) {
3212 rx_ring->rx_buf_len =
3213 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2972 } 3214 }
2973 } 3215 }
2974#endif /* IXGBE_FCOE */ 3216#endif /* IXGBE_FCOE */
2975 } 3217 }
2976
2977} 3218}
2978 3219
2979static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) 3220static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -2996,6 +3237,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
2996 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 3237 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2997 break; 3238 break;
2998 case ixgbe_mac_82599EB: 3239 case ixgbe_mac_82599EB:
3240 case ixgbe_mac_X540:
2999 /* Disable RSC for ACK packets */ 3241 /* Disable RSC for ACK packets */
3000 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 3242 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3001 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 3243 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -3123,6 +3365,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3123 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3365 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3124 break; 3366 break;
3125 case ixgbe_mac_82599EB: 3367 case ixgbe_mac_82599EB:
3368 case ixgbe_mac_X540:
3126 for (i = 0; i < adapter->num_rx_queues; i++) { 3369 for (i = 0; i < adapter->num_rx_queues; i++) {
3127 j = adapter->rx_ring[i]->reg_idx; 3370 j = adapter->rx_ring[i]->reg_idx;
3128 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3371 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3152,6 +3395,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3152 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3395 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3153 break; 3396 break;
3154 case ixgbe_mac_82599EB: 3397 case ixgbe_mac_82599EB:
3398 case ixgbe_mac_X540:
3155 for (i = 0; i < adapter->num_rx_queues; i++) { 3399 for (i = 0; i < adapter->num_rx_queues; i++) {
3156 j = adapter->rx_ring[i]->reg_idx; 3400 j = adapter->rx_ring[i]->reg_idx;
3157 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3401 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3349,8 +3593,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3349{ 3593{
3350 struct ixgbe_hw *hw = &adapter->hw; 3594 struct ixgbe_hw *hw = &adapter->hw;
3351 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3595 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3352 u32 txdctl;
3353 int i, j;
3354 3596
3355 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { 3597 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3356 if (hw->mac.type == ixgbe_mac_82598EB) 3598 if (hw->mac.type == ixgbe_mac_82598EB)
@@ -3366,25 +3608,18 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3366 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 3608 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3367#endif 3609#endif
3368 3610
3369 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, 3611 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3370 DCB_TX_CONFIG); 3612 DCB_TX_CONFIG);
3371 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, 3613 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3372 DCB_RX_CONFIG); 3614 DCB_RX_CONFIG);
3373 3615
3374 /* reconfigure the hardware */
3375 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
3376
3377 for (i = 0; i < adapter->num_tx_queues; i++) {
3378 j = adapter->tx_ring[i]->reg_idx;
3379 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3380 /* PThresh workaround for Tx hang with DFP enabled. */
3381 txdctl |= 32;
3382 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3383 }
3384 /* Enable VLAN tag insert/strip */ 3616 /* Enable VLAN tag insert/strip */
3385 adapter->netdev->features |= NETIF_F_HW_VLAN_RX; 3617 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
3386 3618
3387 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3619 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3620
3621 /* reconfigure the hardware */
3622 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3388} 3623}
3389 3624
3390#endif 3625#endif
@@ -3516,8 +3751,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3516 case ixgbe_mac_82598EB: 3751 case ixgbe_mac_82598EB:
3517 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3752 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3518 break; 3753 break;
3519 default:
3520 case ixgbe_mac_82599EB: 3754 case ixgbe_mac_82599EB:
3755 case ixgbe_mac_X540:
3756 default:
3521 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3757 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3522 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3758 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3523 break; 3759 break;
@@ -3561,13 +3797,24 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3561 else 3797 else
3562 ixgbe_configure_msi_and_legacy(adapter); 3798 ixgbe_configure_msi_and_legacy(adapter);
3563 3799
3564 /* enable the optics */ 3800 /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
3565 if (hw->phy.multispeed_fiber) 3801 if (hw->mac.ops.enable_tx_laser &&
3802 ((hw->phy.multispeed_fiber) ||
3803 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
3804 (hw->mac.type == ixgbe_mac_82599EB))))
3566 hw->mac.ops.enable_tx_laser(hw); 3805 hw->mac.ops.enable_tx_laser(hw);
3567 3806
3568 clear_bit(__IXGBE_DOWN, &adapter->state); 3807 clear_bit(__IXGBE_DOWN, &adapter->state);
3569 ixgbe_napi_enable_all(adapter); 3808 ixgbe_napi_enable_all(adapter);
3570 3809
3810 if (ixgbe_is_sfp(hw)) {
3811 ixgbe_sfp_link_config(adapter);
3812 } else {
3813 err = ixgbe_non_sfp_link_config(hw);
3814 if (err)
3815 e_err(probe, "link_config FAILED %d\n", err);
3816 }
3817
3571 /* clear any pending interrupts, may auto mask */ 3818 /* clear any pending interrupts, may auto mask */
3572 IXGBE_READ_REG(hw, IXGBE_EICR); 3819 IXGBE_READ_REG(hw, IXGBE_EICR);
3573 ixgbe_irq_enable(adapter, true, true); 3820 ixgbe_irq_enable(adapter, true, true);
@@ -3590,26 +3837,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3590 * If we're not hot-pluggable SFP+, we just need to configure link 3837 * If we're not hot-pluggable SFP+, we just need to configure link
3591 * and bring it up. 3838 * and bring it up.
3592 */ 3839 */
3593 if (hw->phy.type == ixgbe_phy_unknown) { 3840 if (hw->phy.type == ixgbe_phy_unknown)
3594 err = hw->phy.ops.identify(hw); 3841 schedule_work(&adapter->sfp_config_module_task);
3595 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3596 /*
3597 * Take the device down and schedule the sfp tasklet
3598 * which will unregister_netdev and log it.
3599 */
3600 ixgbe_down(adapter);
3601 schedule_work(&adapter->sfp_config_module_task);
3602 return err;
3603 }
3604 }
3605
3606 if (ixgbe_is_sfp(hw)) {
3607 ixgbe_sfp_link_config(adapter);
3608 } else {
3609 err = ixgbe_non_sfp_link_config(hw);
3610 if (err)
3611 e_err(probe, "link_config FAILED %d\n", err);
3612 }
3613 3842
3614 /* enable transmits */ 3843 /* enable transmits */
3615 netif_tx_start_all_queues(adapter->netdev); 3844 netif_tx_start_all_queues(adapter->netdev);
@@ -3687,15 +3916,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3687 3916
3688/** 3917/**
3689 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 3918 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
3690 * @adapter: board private structure
3691 * @rx_ring: ring to free buffers from 3919 * @rx_ring: ring to free buffers from
3692 **/ 3920 **/
3693static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 3921static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
3694 struct ixgbe_ring *rx_ring)
3695{ 3922{
3696 struct pci_dev *pdev = adapter->pdev; 3923 struct device *dev = rx_ring->dev;
3697 unsigned long size; 3924 unsigned long size;
3698 unsigned int i; 3925 u16 i;
3699 3926
3700 /* ring already cleared, nothing to do */ 3927 /* ring already cleared, nothing to do */
3701 if (!rx_ring->rx_buffer_info) 3928 if (!rx_ring->rx_buffer_info)
@@ -3707,7 +3934,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3707 3934
3708 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3935 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3709 if (rx_buffer_info->dma) { 3936 if (rx_buffer_info->dma) {
3710 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 3937 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
3711 rx_ring->rx_buf_len, 3938 rx_ring->rx_buf_len,
3712 DMA_FROM_DEVICE); 3939 DMA_FROM_DEVICE);
3713 rx_buffer_info->dma = 0; 3940 rx_buffer_info->dma = 0;
@@ -3718,7 +3945,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3718 do { 3945 do {
3719 struct sk_buff *this = skb; 3946 struct sk_buff *this = skb;
3720 if (IXGBE_RSC_CB(this)->delay_unmap) { 3947 if (IXGBE_RSC_CB(this)->delay_unmap) {
3721 dma_unmap_single(&pdev->dev, 3948 dma_unmap_single(dev,
3722 IXGBE_RSC_CB(this)->dma, 3949 IXGBE_RSC_CB(this)->dma,
3723 rx_ring->rx_buf_len, 3950 rx_ring->rx_buf_len,
3724 DMA_FROM_DEVICE); 3951 DMA_FROM_DEVICE);
@@ -3732,7 +3959,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3732 if (!rx_buffer_info->page) 3959 if (!rx_buffer_info->page)
3733 continue; 3960 continue;
3734 if (rx_buffer_info->page_dma) { 3961 if (rx_buffer_info->page_dma) {
3735 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 3962 dma_unmap_page(dev, rx_buffer_info->page_dma,
3736 PAGE_SIZE / 2, DMA_FROM_DEVICE); 3963 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3737 rx_buffer_info->page_dma = 0; 3964 rx_buffer_info->page_dma = 0;
3738 } 3965 }
@@ -3749,24 +3976,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3749 3976
3750 rx_ring->next_to_clean = 0; 3977 rx_ring->next_to_clean = 0;
3751 rx_ring->next_to_use = 0; 3978 rx_ring->next_to_use = 0;
3752
3753 if (rx_ring->head)
3754 writel(0, adapter->hw.hw_addr + rx_ring->head);
3755 if (rx_ring->tail)
3756 writel(0, adapter->hw.hw_addr + rx_ring->tail);
3757} 3979}
3758 3980
3759/** 3981/**
3760 * ixgbe_clean_tx_ring - Free Tx Buffers 3982 * ixgbe_clean_tx_ring - Free Tx Buffers
3761 * @adapter: board private structure
3762 * @tx_ring: ring to be cleaned 3983 * @tx_ring: ring to be cleaned
3763 **/ 3984 **/
3764static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 3985static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
3765 struct ixgbe_ring *tx_ring)
3766{ 3986{
3767 struct ixgbe_tx_buffer *tx_buffer_info; 3987 struct ixgbe_tx_buffer *tx_buffer_info;
3768 unsigned long size; 3988 unsigned long size;
3769 unsigned int i; 3989 u16 i;
3770 3990
3771 /* ring already cleared, nothing to do */ 3991 /* ring already cleared, nothing to do */
3772 if (!tx_ring->tx_buffer_info) 3992 if (!tx_ring->tx_buffer_info)
@@ -3775,7 +3995,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3775 /* Free all the Tx ring sk_buffs */ 3995 /* Free all the Tx ring sk_buffs */
3776 for (i = 0; i < tx_ring->count; i++) { 3996 for (i = 0; i < tx_ring->count; i++) {
3777 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3997 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3778 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 3998 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
3779 } 3999 }
3780 4000
3781 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4001 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3786,11 +4006,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3786 4006
3787 tx_ring->next_to_use = 0; 4007 tx_ring->next_to_use = 0;
3788 tx_ring->next_to_clean = 0; 4008 tx_ring->next_to_clean = 0;
3789
3790 if (tx_ring->head)
3791 writel(0, adapter->hw.hw_addr + tx_ring->head);
3792 if (tx_ring->tail)
3793 writel(0, adapter->hw.hw_addr + tx_ring->tail);
3794} 4009}
3795 4010
3796/** 4011/**
@@ -3802,7 +4017,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3802 int i; 4017 int i;
3803 4018
3804 for (i = 0; i < adapter->num_rx_queues; i++) 4019 for (i = 0; i < adapter->num_rx_queues; i++)
3805 ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); 4020 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
3806} 4021}
3807 4022
3808/** 4023/**
@@ -3814,7 +4029,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3814 int i; 4029 int i;
3815 4030
3816 for (i = 0; i < adapter->num_tx_queues; i++) 4031 for (i = 0; i < adapter->num_tx_queues; i++)
3817 ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); 4032 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
3818} 4033}
3819 4034
3820void ixgbe_down(struct ixgbe_adapter *adapter) 4035void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3823,7 +4038,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3823 struct ixgbe_hw *hw = &adapter->hw; 4038 struct ixgbe_hw *hw = &adapter->hw;
3824 u32 rxctrl; 4039 u32 rxctrl;
3825 u32 txdctl; 4040 u32 txdctl;
3826 int i, j; 4041 int i;
3827 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 4042 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3828 4043
3829 /* signal that we are down to the interrupt handler */ 4044 /* signal that we are down to the interrupt handler */
@@ -3881,26 +4096,36 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3881 4096
3882 /* disable transmits in the hardware now that interrupts are off */ 4097 /* disable transmits in the hardware now that interrupts are off */
3883 for (i = 0; i < adapter->num_tx_queues; i++) { 4098 for (i = 0; i < adapter->num_tx_queues; i++) {
3884 j = adapter->tx_ring[i]->reg_idx; 4099 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
3885 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 4100 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3886 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), 4101 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
3887 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 4102 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3888 } 4103 }
3889 /* Disable the Tx DMA engine on 82599 */ 4104 /* Disable the Tx DMA engine on 82599 */
3890 if (hw->mac.type == ixgbe_mac_82599EB) 4105 switch (hw->mac.type) {
4106 case ixgbe_mac_82599EB:
4107 case ixgbe_mac_X540:
3891 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 4108 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3892 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 4109 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3893 ~IXGBE_DMATXCTL_TE)); 4110 ~IXGBE_DMATXCTL_TE));
3894 4111 break;
3895 /* power down the optics */ 4112 default:
3896 if (hw->phy.multispeed_fiber) 4113 break;
3897 hw->mac.ops.disable_tx_laser(hw); 4114 }
3898 4115
3899 /* clear n-tuple filters that are cached */ 4116 /* clear n-tuple filters that are cached */
3900 ethtool_ntuple_flush(netdev); 4117 ethtool_ntuple_flush(netdev);
3901 4118
3902 if (!pci_channel_offline(adapter->pdev)) 4119 if (!pci_channel_offline(adapter->pdev))
3903 ixgbe_reset(adapter); 4120 ixgbe_reset(adapter);
4121
4122 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
4123 if (hw->mac.ops.disable_tx_laser &&
4124 ((hw->phy.multispeed_fiber) ||
4125 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
4126 (hw->mac.type == ixgbe_mac_82599EB))))
4127 hw->mac.ops.disable_tx_laser(hw);
4128
3904 ixgbe_clean_all_tx_rings(adapter); 4129 ixgbe_clean_all_tx_rings(adapter);
3905 ixgbe_clean_all_rx_rings(adapter); 4130 ixgbe_clean_all_rx_rings(adapter);
3906 4131
@@ -3925,10 +4150,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
3925 int tx_clean_complete, work_done = 0; 4150 int tx_clean_complete, work_done = 0;
3926 4151
3927#ifdef CONFIG_IXGBE_DCA 4152#ifdef CONFIG_IXGBE_DCA
3928 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 4153 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3929 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]); 4154 ixgbe_update_dca(q_vector);
3930 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
3931 }
3932#endif 4155#endif
3933 4156
3934 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); 4157 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@ -3956,6 +4179,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
3956{ 4179{
3957 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4180 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3958 4181
4182 adapter->tx_timeout_count++;
4183
3959 /* Do the reset outside of interrupt context */ 4184 /* Do the reset outside of interrupt context */
3960 schedule_work(&adapter->reset_task); 4185 schedule_work(&adapter->reset_task);
3961} 4186}
@@ -3970,8 +4195,6 @@ static void ixgbe_reset_task(struct work_struct *work)
3970 test_bit(__IXGBE_RESETTING, &adapter->state)) 4195 test_bit(__IXGBE_RESETTING, &adapter->state))
3971 return; 4196 return;
3972 4197
3973 adapter->tx_timeout_count++;
3974
3975 ixgbe_dump(adapter); 4198 ixgbe_dump(adapter);
3976 netdev_err(adapter->netdev, "Reset adapter\n"); 4199 netdev_err(adapter->netdev, "Reset adapter\n");
3977 ixgbe_reinit_locked(adapter); 4200 ixgbe_reinit_locked(adapter);
@@ -4221,19 +4444,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4221static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 4444static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4222{ 4445{
4223 int i; 4446 int i;
4224 bool ret = false;
4225 4447
4226 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4448 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
4227 for (i = 0; i < adapter->num_rx_queues; i++) 4449 return false;
4228 adapter->rx_ring[i]->reg_idx = i;
4229 for (i = 0; i < adapter->num_tx_queues; i++)
4230 adapter->tx_ring[i]->reg_idx = i;
4231 ret = true;
4232 } else {
4233 ret = false;
4234 }
4235 4450
4236 return ret; 4451 for (i = 0; i < adapter->num_rx_queues; i++)
4452 adapter->rx_ring[i]->reg_idx = i;
4453 for (i = 0; i < adapter->num_tx_queues; i++)
4454 adapter->tx_ring[i]->reg_idx = i;
4455
4456 return true;
4237} 4457}
4238 4458
4239#ifdef CONFIG_IXGBE_DCB 4459#ifdef CONFIG_IXGBE_DCB
@@ -4250,71 +4470,67 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4250 bool ret = false; 4470 bool ret = false;
4251 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 4471 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4252 4472
4253 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4473 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4254 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 4474 return false;
4255 /* the number of queues is assumed to be symmetric */
4256 for (i = 0; i < dcb_i; i++) {
4257 adapter->rx_ring[i]->reg_idx = i << 3;
4258 adapter->tx_ring[i]->reg_idx = i << 2;
4259 }
4260 ret = true;
4261 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4262 if (dcb_i == 8) {
4263 /*
4264 * Tx TC0 starts at: descriptor queue 0
4265 * Tx TC1 starts at: descriptor queue 32
4266 * Tx TC2 starts at: descriptor queue 64
4267 * Tx TC3 starts at: descriptor queue 80
4268 * Tx TC4 starts at: descriptor queue 96
4269 * Tx TC5 starts at: descriptor queue 104
4270 * Tx TC6 starts at: descriptor queue 112
4271 * Tx TC7 starts at: descriptor queue 120
4272 *
4273 * Rx TC0-TC7 are offset by 16 queues each
4274 */
4275 for (i = 0; i < 3; i++) {
4276 adapter->tx_ring[i]->reg_idx = i << 5;
4277 adapter->rx_ring[i]->reg_idx = i << 4;
4278 }
4279 for ( ; i < 5; i++) {
4280 adapter->tx_ring[i]->reg_idx =
4281 ((i + 2) << 4);
4282 adapter->rx_ring[i]->reg_idx = i << 4;
4283 }
4284 for ( ; i < dcb_i; i++) {
4285 adapter->tx_ring[i]->reg_idx =
4286 ((i + 8) << 3);
4287 adapter->rx_ring[i]->reg_idx = i << 4;
4288 }
4289 4475
4290 ret = true; 4476 /* the number of queues is assumed to be symmetric */
4291 } else if (dcb_i == 4) { 4477 switch (adapter->hw.mac.type) {
4292 /* 4478 case ixgbe_mac_82598EB:
4293 * Tx TC0 starts at: descriptor queue 0 4479 for (i = 0; i < dcb_i; i++) {
4294 * Tx TC1 starts at: descriptor queue 64 4480 adapter->rx_ring[i]->reg_idx = i << 3;
4295 * Tx TC2 starts at: descriptor queue 96 4481 adapter->tx_ring[i]->reg_idx = i << 2;
4296 * Tx TC3 starts at: descriptor queue 112 4482 }
4297 * 4483 ret = true;
4298 * Rx TC0-TC3 are offset by 32 queues each 4484 break;
4299 */ 4485 case ixgbe_mac_82599EB:
4300 adapter->tx_ring[0]->reg_idx = 0; 4486 case ixgbe_mac_X540:
4301 adapter->tx_ring[1]->reg_idx = 64; 4487 if (dcb_i == 8) {
4302 adapter->tx_ring[2]->reg_idx = 96; 4488 /*
4303 adapter->tx_ring[3]->reg_idx = 112; 4489 * Tx TC0 starts at: descriptor queue 0
4304 for (i = 0 ; i < dcb_i; i++) 4490 * Tx TC1 starts at: descriptor queue 32
4305 adapter->rx_ring[i]->reg_idx = i << 5; 4491 * Tx TC2 starts at: descriptor queue 64
4306 4492 * Tx TC3 starts at: descriptor queue 80
4307 ret = true; 4493 * Tx TC4 starts at: descriptor queue 96
4308 } else { 4494 * Tx TC5 starts at: descriptor queue 104
4309 ret = false; 4495 * Tx TC6 starts at: descriptor queue 112
4496 * Tx TC7 starts at: descriptor queue 120
4497 *
4498 * Rx TC0-TC7 are offset by 16 queues each
4499 */
4500 for (i = 0; i < 3; i++) {
4501 adapter->tx_ring[i]->reg_idx = i << 5;
4502 adapter->rx_ring[i]->reg_idx = i << 4;
4310 } 4503 }
4311 } else { 4504 for ( ; i < 5; i++) {
4312 ret = false; 4505 adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
4506 adapter->rx_ring[i]->reg_idx = i << 4;
4507 }
4508 for ( ; i < dcb_i; i++) {
4509 adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
4510 adapter->rx_ring[i]->reg_idx = i << 4;
4511 }
4512 ret = true;
4513 } else if (dcb_i == 4) {
4514 /*
4515 * Tx TC0 starts at: descriptor queue 0
4516 * Tx TC1 starts at: descriptor queue 64
4517 * Tx TC2 starts at: descriptor queue 96
4518 * Tx TC3 starts at: descriptor queue 112
4519 *
4520 * Rx TC0-TC3 are offset by 32 queues each
4521 */
4522 adapter->tx_ring[0]->reg_idx = 0;
4523 adapter->tx_ring[1]->reg_idx = 64;
4524 adapter->tx_ring[2]->reg_idx = 96;
4525 adapter->tx_ring[3]->reg_idx = 112;
4526 for (i = 0 ; i < dcb_i; i++)
4527 adapter->rx_ring[i]->reg_idx = i << 5;
4528 ret = true;
4313 } 4529 }
4314 } else { 4530 break;
4315 ret = false; 4531 default:
4532 break;
4316 } 4533 }
4317
4318 return ret; 4534 return ret;
4319} 4535}
4320#endif 4536#endif
@@ -4354,55 +4570,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4354 */ 4570 */
4355static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) 4571static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4356{ 4572{
4357 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
4358 bool ret = false;
4359 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 4573 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4574 int i;
4575 u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
4576
4577 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4578 return false;
4360 4579
4361 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4362#ifdef CONFIG_IXGBE_DCB 4580#ifdef CONFIG_IXGBE_DCB
4363 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4581 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4364 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 4582 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4365 4583
4366 ixgbe_cache_ring_dcb(adapter); 4584 ixgbe_cache_ring_dcb(adapter);
4367 /* find out queues in TC for FCoE */ 4585 /* find out queues in TC for FCoE */
4368 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; 4586 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4369 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; 4587 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
4370 /* 4588 /*
4371 * In 82599, the number of Tx queues for each traffic 4589 * In 82599, the number of Tx queues for each traffic
4372 * class for both 8-TC and 4-TC modes are: 4590 * class for both 8-TC and 4-TC modes are:
4373 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 4591 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4374 * 8 TCs: 32 32 16 16 8 8 8 8 4592 * 8 TCs: 32 32 16 16 8 8 8 8
4375 * 4 TCs: 64 64 32 32 4593 * 4 TCs: 64 64 32 32
4376 * We have max 8 queues for FCoE, where 8 the is 4594 * We have max 8 queues for FCoE, where 8 the is
4377 * FCoE redirection table size. If TC for FCoE is 4595 * FCoE redirection table size. If TC for FCoE is
4378 * less than or equal to TC3, we have enough queues 4596 * less than or equal to TC3, we have enough queues
4379 * to add max of 8 queues for FCoE, so we start FCoE 4597 * to add max of 8 queues for FCoE, so we start FCoE
4380 * tx descriptor from the next one, i.e., reg_idx + 1. 4598 * Tx queue from the next one, i.e., reg_idx + 1.
4381 * If TC for FCoE is above TC3, implying 8 TC mode, 4599 * If TC for FCoE is above TC3, implying 8 TC mode,
4382 * and we need 8 for FCoE, we have to take all queues 4600 * and we need 8 for FCoE, we have to take all queues
4383 * in that traffic class for FCoE. 4601 * in that traffic class for FCoE.
4384 */ 4602 */
4385 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) 4603 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4386 fcoe_tx_i--; 4604 fcoe_tx_i--;
4387 } 4605 }
4388#endif /* CONFIG_IXGBE_DCB */ 4606#endif /* CONFIG_IXGBE_DCB */
4389 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4607 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4390 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4608 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4391 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 4609 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4392 ixgbe_cache_ring_fdir(adapter); 4610 ixgbe_cache_ring_fdir(adapter);
4393 else 4611 else
4394 ixgbe_cache_ring_rss(adapter); 4612 ixgbe_cache_ring_rss(adapter);
4395 4613
4396 fcoe_rx_i = f->mask; 4614 fcoe_rx_i = f->mask;
4397 fcoe_tx_i = f->mask; 4615 fcoe_tx_i = f->mask;
4398 }
4399 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4400 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4401 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4402 }
4403 ret = true;
4404 } 4616 }
4405 return ret; 4617 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4618 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4619 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4620 }
4621 return true;
4406} 4622}
4407 4623
4408#endif /* IXGBE_FCOE */ 4624#endif /* IXGBE_FCOE */
@@ -4471,65 +4687,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4471 **/ 4687 **/
4472static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 4688static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4473{ 4689{
4474 int i; 4690 int rx = 0, tx = 0, nid = adapter->node;
4475 int orig_node = adapter->node;
4476 4691
4477 for (i = 0; i < adapter->num_tx_queues; i++) { 4692 if (nid < 0 || !node_online(nid))
4478 struct ixgbe_ring *ring = adapter->tx_ring[i]; 4693 nid = first_online_node;
4479 if (orig_node == -1) { 4694
4480 int cur_node = next_online_node(adapter->node); 4695 for (; tx < adapter->num_tx_queues; tx++) {
4481 if (cur_node == MAX_NUMNODES) 4696 struct ixgbe_ring *ring;
4482 cur_node = first_online_node; 4697
4483 adapter->node = cur_node; 4698 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4484 }
4485 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4486 adapter->node);
4487 if (!ring) 4699 if (!ring)
4488 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4700 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4489 if (!ring) 4701 if (!ring)
4490 goto err_tx_ring_allocation; 4702 goto err_allocation;
4491 ring->count = adapter->tx_ring_count; 4703 ring->count = adapter->tx_ring_count;
4492 ring->queue_index = i; 4704 ring->queue_index = tx;
4493 ring->numa_node = adapter->node; 4705 ring->numa_node = nid;
4706 ring->dev = &adapter->pdev->dev;
4707 ring->netdev = adapter->netdev;
4494 4708
4495 adapter->tx_ring[i] = ring; 4709 adapter->tx_ring[tx] = ring;
4496 } 4710 }
4497 4711
4498 /* Restore the adapter's original node */ 4712 for (; rx < adapter->num_rx_queues; rx++) {
4499 adapter->node = orig_node; 4713 struct ixgbe_ring *ring;
4500 4714
4501 for (i = 0; i < adapter->num_rx_queues; i++) { 4715 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4502 struct ixgbe_ring *ring = adapter->rx_ring[i];
4503 if (orig_node == -1) {
4504 int cur_node = next_online_node(adapter->node);
4505 if (cur_node == MAX_NUMNODES)
4506 cur_node = first_online_node;
4507 adapter->node = cur_node;
4508 }
4509 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4510 adapter->node);
4511 if (!ring) 4716 if (!ring)
4512 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4717 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4513 if (!ring) 4718 if (!ring)
4514 goto err_rx_ring_allocation; 4719 goto err_allocation;
4515 ring->count = adapter->rx_ring_count; 4720 ring->count = adapter->rx_ring_count;
4516 ring->queue_index = i; 4721 ring->queue_index = rx;
4517 ring->numa_node = adapter->node; 4722 ring->numa_node = nid;
4723 ring->dev = &adapter->pdev->dev;
4724 ring->netdev = adapter->netdev;
4518 4725
4519 adapter->rx_ring[i] = ring; 4726 adapter->rx_ring[rx] = ring;
4520 } 4727 }
4521 4728
4522 /* Restore the adapter's original node */
4523 adapter->node = orig_node;
4524
4525 ixgbe_cache_ring_register(adapter); 4729 ixgbe_cache_ring_register(adapter);
4526 4730
4527 return 0; 4731 return 0;
4528 4732
4529err_rx_ring_allocation: 4733err_allocation:
4530 for (i = 0; i < adapter->num_tx_queues; i++) 4734 while (tx)
4531 kfree(adapter->tx_ring[i]); 4735 kfree(adapter->tx_ring[--tx]);
4532err_tx_ring_allocation: 4736
4737 while (rx)
4738 kfree(adapter->rx_ring[--rx]);
4533 return -ENOMEM; 4739 return -ENOMEM;
4534} 4740}
4535 4741
@@ -4751,6 +4957,11 @@ err_set_interrupt:
4751 return err; 4957 return err;
4752} 4958}
4753 4959
4960static void ring_free_rcu(struct rcu_head *head)
4961{
4962 kfree(container_of(head, struct ixgbe_ring, rcu));
4963}
4964
4754/** 4965/**
4755 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 4966 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4756 * @adapter: board private structure to clear interrupt scheme on 4967 * @adapter: board private structure to clear interrupt scheme on
@@ -4767,7 +4978,12 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4767 adapter->tx_ring[i] = NULL; 4978 adapter->tx_ring[i] = NULL;
4768 } 4979 }
4769 for (i = 0; i < adapter->num_rx_queues; i++) { 4980 for (i = 0; i < adapter->num_rx_queues; i++) {
4770 kfree(adapter->rx_ring[i]); 4981 struct ixgbe_ring *ring = adapter->rx_ring[i];
4982
4983 /* ixgbe_get_stats64() might access this ring, we must wait
4984 * a grace period before freeing it.
4985 */
4986 call_rcu(&ring->rcu, ring_free_rcu);
4771 adapter->rx_ring[i] = NULL; 4987 adapter->rx_ring[i] = NULL;
4772 } 4988 }
4773 4989
@@ -4847,6 +5063,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4847 int j; 5063 int j;
4848 struct tc_configuration *tc; 5064 struct tc_configuration *tc;
4849#endif 5065#endif
5066 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4850 5067
4851 /* PCI config space info */ 5068 /* PCI config space info */
4852 5069
@@ -4861,11 +5078,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4861 adapter->ring_feature[RING_F_RSS].indices = rss; 5078 adapter->ring_feature[RING_F_RSS].indices = rss;
4862 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 5079 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
4863 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; 5080 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
4864 if (hw->mac.type == ixgbe_mac_82598EB) { 5081 switch (hw->mac.type) {
5082 case ixgbe_mac_82598EB:
4865 if (hw->device_id == IXGBE_DEV_ID_82598AT) 5083 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4866 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 5084 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4867 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 5085 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
4868 } else if (hw->mac.type == ixgbe_mac_82599EB) { 5086 break;
5087 case ixgbe_mac_82599EB:
5088 case ixgbe_mac_X540:
4869 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 5089 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
4870 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 5090 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4871 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 5091 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -4894,6 +5114,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4894 adapter->fcoe.up = IXGBE_FCOE_DEFTC; 5114 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4895#endif 5115#endif
4896#endif /* IXGBE_FCOE */ 5116#endif /* IXGBE_FCOE */
5117 break;
5118 default:
5119 break;
4897 } 5120 }
4898 5121
4899#ifdef CONFIG_IXGBE_DCB 5122#ifdef CONFIG_IXGBE_DCB
@@ -4923,8 +5146,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4923#ifdef CONFIG_DCB 5146#ifdef CONFIG_DCB
4924 adapter->last_lfc_mode = hw->fc.current_mode; 5147 adapter->last_lfc_mode = hw->fc.current_mode;
4925#endif 5148#endif
4926 hw->fc.high_water = IXGBE_DEFAULT_FCRTH; 5149 hw->fc.high_water = FC_HIGH_WATER(max_frame);
4927 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 5150 hw->fc.low_water = FC_LOW_WATER(max_frame);
4928 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 5151 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4929 hw->fc.send_xon = true; 5152 hw->fc.send_xon = true;
4930 hw->fc.disable_fc_autoneg = false; 5153 hw->fc.disable_fc_autoneg = false;
@@ -4962,30 +5185,27 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4962 5185
4963/** 5186/**
4964 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 5187 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
4965 * @adapter: board private structure
4966 * @tx_ring: tx descriptor ring (for a specific queue) to setup 5188 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4967 * 5189 *
4968 * Return 0 on success, negative on failure 5190 * Return 0 on success, negative on failure
4969 **/ 5191 **/
4970int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 5192int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
4971 struct ixgbe_ring *tx_ring)
4972{ 5193{
4973 struct pci_dev *pdev = adapter->pdev; 5194 struct device *dev = tx_ring->dev;
4974 int size; 5195 int size;
4975 5196
4976 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 5197 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4977 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); 5198 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
4978 if (!tx_ring->tx_buffer_info) 5199 if (!tx_ring->tx_buffer_info)
4979 tx_ring->tx_buffer_info = vmalloc(size); 5200 tx_ring->tx_buffer_info = vzalloc(size);
4980 if (!tx_ring->tx_buffer_info) 5201 if (!tx_ring->tx_buffer_info)
4981 goto err; 5202 goto err;
4982 memset(tx_ring->tx_buffer_info, 0, size);
4983 5203
4984 /* round up to nearest 4K */ 5204 /* round up to nearest 4K */
4985 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 5205 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4986 tx_ring->size = ALIGN(tx_ring->size, 4096); 5206 tx_ring->size = ALIGN(tx_ring->size, 4096);
4987 5207
4988 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 5208 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4989 &tx_ring->dma, GFP_KERNEL); 5209 &tx_ring->dma, GFP_KERNEL);
4990 if (!tx_ring->desc) 5210 if (!tx_ring->desc)
4991 goto err; 5211 goto err;
@@ -4998,7 +5218,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4998err: 5218err:
4999 vfree(tx_ring->tx_buffer_info); 5219 vfree(tx_ring->tx_buffer_info);
5000 tx_ring->tx_buffer_info = NULL; 5220 tx_ring->tx_buffer_info = NULL;
5001 e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n"); 5221 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
5002 return -ENOMEM; 5222 return -ENOMEM;
5003} 5223}
5004 5224
@@ -5017,7 +5237,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5017 int i, err = 0; 5237 int i, err = 0;
5018 5238
5019 for (i = 0; i < adapter->num_tx_queues; i++) { 5239 for (i = 0; i < adapter->num_tx_queues; i++) {
5020 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); 5240 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
5021 if (!err) 5241 if (!err)
5022 continue; 5242 continue;
5023 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 5243 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5029,48 +5249,40 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5029 5249
5030/** 5250/**
5031 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 5251 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5032 * @adapter: board private structure
5033 * @rx_ring: rx descriptor ring (for a specific queue) to setup 5252 * @rx_ring: rx descriptor ring (for a specific queue) to setup
5034 * 5253 *
5035 * Returns 0 on success, negative on failure 5254 * Returns 0 on success, negative on failure
5036 **/ 5255 **/
5037int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 5256int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5038 struct ixgbe_ring *rx_ring)
5039{ 5257{
5040 struct pci_dev *pdev = adapter->pdev; 5258 struct device *dev = rx_ring->dev;
5041 int size; 5259 int size;
5042 5260
5043 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 5261 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5044 rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); 5262 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
5045 if (!rx_ring->rx_buffer_info) 5263 if (!rx_ring->rx_buffer_info)
5046 rx_ring->rx_buffer_info = vmalloc(size); 5264 rx_ring->rx_buffer_info = vzalloc(size);
5047 if (!rx_ring->rx_buffer_info) { 5265 if (!rx_ring->rx_buffer_info)
5048 e_err(probe, "vmalloc allocation failed for the Rx " 5266 goto err;
5049 "descriptor ring\n");
5050 goto alloc_failed;
5051 }
5052 memset(rx_ring->rx_buffer_info, 0, size);
5053 5267
5054 /* Round up to nearest 4K */ 5268 /* Round up to nearest 4K */
5055 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 5269 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5056 rx_ring->size = ALIGN(rx_ring->size, 4096); 5270 rx_ring->size = ALIGN(rx_ring->size, 4096);
5057 5271
5058 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 5272 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5059 &rx_ring->dma, GFP_KERNEL); 5273 &rx_ring->dma, GFP_KERNEL);
5060 5274
5061 if (!rx_ring->desc) { 5275 if (!rx_ring->desc)
5062 e_err(probe, "Memory allocation failed for the Rx " 5276 goto err;
5063 "descriptor ring\n");
5064 vfree(rx_ring->rx_buffer_info);
5065 goto alloc_failed;
5066 }
5067 5277
5068 rx_ring->next_to_clean = 0; 5278 rx_ring->next_to_clean = 0;
5069 rx_ring->next_to_use = 0; 5279 rx_ring->next_to_use = 0;
5070 5280
5071 return 0; 5281 return 0;
5072 5282err:
5073alloc_failed: 5283 vfree(rx_ring->rx_buffer_info);
5284 rx_ring->rx_buffer_info = NULL;
5285 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5074 return -ENOMEM; 5286 return -ENOMEM;
5075} 5287}
5076 5288
@@ -5084,13 +5296,12 @@ alloc_failed:
5084 * 5296 *
5085 * Return 0 on success, negative on failure 5297 * Return 0 on success, negative on failure
5086 **/ 5298 **/
5087
5088static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 5299static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5089{ 5300{
5090 int i, err = 0; 5301 int i, err = 0;
5091 5302
5092 for (i = 0; i < adapter->num_rx_queues; i++) { 5303 for (i = 0; i < adapter->num_rx_queues; i++) {
5093 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); 5304 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5094 if (!err) 5305 if (!err)
5095 continue; 5306 continue;
5096 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 5307 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5102,23 +5313,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5102 5313
5103/** 5314/**
5104 * ixgbe_free_tx_resources - Free Tx Resources per Queue 5315 * ixgbe_free_tx_resources - Free Tx Resources per Queue
5105 * @adapter: board private structure
5106 * @tx_ring: Tx descriptor ring for a specific queue 5316 * @tx_ring: Tx descriptor ring for a specific queue
5107 * 5317 *
5108 * Free all transmit software resources 5318 * Free all transmit software resources
5109 **/ 5319 **/
5110void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 5320void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5111 struct ixgbe_ring *tx_ring)
5112{ 5321{
5113 struct pci_dev *pdev = adapter->pdev; 5322 ixgbe_clean_tx_ring(tx_ring);
5114
5115 ixgbe_clean_tx_ring(adapter, tx_ring);
5116 5323
5117 vfree(tx_ring->tx_buffer_info); 5324 vfree(tx_ring->tx_buffer_info);
5118 tx_ring->tx_buffer_info = NULL; 5325 tx_ring->tx_buffer_info = NULL;
5119 5326
5120 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 5327 /* if not set, then don't free */
5121 tx_ring->dma); 5328 if (!tx_ring->desc)
5329 return;
5330
5331 dma_free_coherent(tx_ring->dev, tx_ring->size,
5332 tx_ring->desc, tx_ring->dma);
5122 5333
5123 tx_ring->desc = NULL; 5334 tx_ring->desc = NULL;
5124} 5335}
@@ -5135,28 +5346,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5135 5346
5136 for (i = 0; i < adapter->num_tx_queues; i++) 5347 for (i = 0; i < adapter->num_tx_queues; i++)
5137 if (adapter->tx_ring[i]->desc) 5348 if (adapter->tx_ring[i]->desc)
5138 ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); 5349 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5139} 5350}
5140 5351
5141/** 5352/**
5142 * ixgbe_free_rx_resources - Free Rx Resources 5353 * ixgbe_free_rx_resources - Free Rx Resources
5143 * @adapter: board private structure
5144 * @rx_ring: ring to clean the resources from 5354 * @rx_ring: ring to clean the resources from
5145 * 5355 *
5146 * Free all receive software resources 5356 * Free all receive software resources
5147 **/ 5357 **/
5148void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 5358void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5149 struct ixgbe_ring *rx_ring)
5150{ 5359{
5151 struct pci_dev *pdev = adapter->pdev; 5360 ixgbe_clean_rx_ring(rx_ring);
5152
5153 ixgbe_clean_rx_ring(adapter, rx_ring);
5154 5361
5155 vfree(rx_ring->rx_buffer_info); 5362 vfree(rx_ring->rx_buffer_info);
5156 rx_ring->rx_buffer_info = NULL; 5363 rx_ring->rx_buffer_info = NULL;
5157 5364
5158 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 5365 /* if not set, then don't free */
5159 rx_ring->dma); 5366 if (!rx_ring->desc)
5367 return;
5368
5369 dma_free_coherent(rx_ring->dev, rx_ring->size,
5370 rx_ring->desc, rx_ring->dma);
5160 5371
5161 rx_ring->desc = NULL; 5372 rx_ring->desc = NULL;
5162} 5373}
@@ -5173,7 +5384,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5173 5384
5174 for (i = 0; i < adapter->num_rx_queues; i++) 5385 for (i = 0; i < adapter->num_rx_queues; i++)
5175 if (adapter->rx_ring[i]->desc) 5386 if (adapter->rx_ring[i]->desc)
5176 ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); 5387 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5177} 5388}
5178 5389
5179/** 5390/**
@@ -5186,6 +5397,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5186static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) 5397static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5187{ 5398{
5188 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5399 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5400 struct ixgbe_hw *hw = &adapter->hw;
5189 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5401 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5190 5402
5191 /* MTU < 68 is an error and causes problems on some kernels */ 5403 /* MTU < 68 is an error and causes problems on some kernels */
@@ -5196,6 +5408,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5196 /* must set new MTU before calling down or up */ 5408 /* must set new MTU before calling down or up */
5197 netdev->mtu = new_mtu; 5409 netdev->mtu = new_mtu;
5198 5410
5411 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5412 hw->fc.low_water = FC_LOW_WATER(max_frame);
5413
5199 if (netif_running(netdev)) 5414 if (netif_running(netdev))
5200 ixgbe_reinit_locked(adapter); 5415 ixgbe_reinit_locked(adapter);
5201 5416
@@ -5291,8 +5506,8 @@ static int ixgbe_close(struct net_device *netdev)
5291#ifdef CONFIG_PM 5506#ifdef CONFIG_PM
5292static int ixgbe_resume(struct pci_dev *pdev) 5507static int ixgbe_resume(struct pci_dev *pdev)
5293{ 5508{
5294 struct net_device *netdev = pci_get_drvdata(pdev); 5509 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5295 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5510 struct net_device *netdev = adapter->netdev;
5296 u32 err; 5511 u32 err;
5297 5512
5298 pci_set_power_state(pdev, PCI_D0); 5513 pci_set_power_state(pdev, PCI_D0);
@@ -5323,7 +5538,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 5538 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5324 5539
5325 if (netif_running(netdev)) { 5540 if (netif_running(netdev)) {
5326 err = ixgbe_open(adapter->netdev); 5541 err = ixgbe_open(netdev);
5327 if (err) 5542 if (err)
5328 return err; 5543 return err;
5329 } 5544 }
@@ -5336,8 +5551,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
5336 5551
5337static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) 5552static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5338{ 5553{
5339 struct net_device *netdev = pci_get_drvdata(pdev); 5554 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5340 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5555 struct net_device *netdev = adapter->netdev;
5341 struct ixgbe_hw *hw = &adapter->hw; 5556 struct ixgbe_hw *hw = &adapter->hw;
5342 u32 ctrl, fctrl; 5557 u32 ctrl, fctrl;
5343 u32 wufc = adapter->wol; 5558 u32 wufc = adapter->wol;
@@ -5354,6 +5569,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5354 ixgbe_free_all_rx_resources(adapter); 5569 ixgbe_free_all_rx_resources(adapter);
5355 } 5570 }
5356 5571
5572 ixgbe_clear_interrupt_scheme(adapter);
5573
5357#ifdef CONFIG_PM 5574#ifdef CONFIG_PM
5358 retval = pci_save_state(pdev); 5575 retval = pci_save_state(pdev);
5359 if (retval) 5576 if (retval)
@@ -5380,15 +5597,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5380 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 5597 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5381 } 5598 }
5382 5599
5383 if (wufc && hw->mac.type == ixgbe_mac_82599EB) 5600 switch (hw->mac.type) {
5384 pci_wake_from_d3(pdev, true); 5601 case ixgbe_mac_82598EB:
5385 else
5386 pci_wake_from_d3(pdev, false); 5602 pci_wake_from_d3(pdev, false);
5603 break;
5604 case ixgbe_mac_82599EB:
5605 case ixgbe_mac_X540:
5606 pci_wake_from_d3(pdev, !!wufc);
5607 break;
5608 default:
5609 break;
5610 }
5387 5611
5388 *enable_wake = !!wufc; 5612 *enable_wake = !!wufc;
5389 5613
5390 ixgbe_clear_interrupt_scheme(adapter);
5391
5392 ixgbe_release_hw_control(adapter); 5614 ixgbe_release_hw_control(adapter);
5393 5615
5394 pci_disable_device(pdev); 5616 pci_disable_device(pdev);
@@ -5437,10 +5659,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5437{ 5659{
5438 struct net_device *netdev = adapter->netdev; 5660 struct net_device *netdev = adapter->netdev;
5439 struct ixgbe_hw *hw = &adapter->hw; 5661 struct ixgbe_hw *hw = &adapter->hw;
5662 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5440 u64 total_mpc = 0; 5663 u64 total_mpc = 0;
5441 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 5664 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5442 u64 non_eop_descs = 0, restart_queue = 0; 5665 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5443 struct ixgbe_hw_stats *hwstats = &adapter->stats; 5666 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5667 u64 bytes = 0, packets = 0;
5444 5668
5445 if (test_bit(__IXGBE_DOWN, &adapter->state) || 5669 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5446 test_bit(__IXGBE_RESETTING, &adapter->state)) 5670 test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5453,21 +5677,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5453 adapter->hw_rx_no_dma_resources += 5677 adapter->hw_rx_no_dma_resources +=
5454 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5678 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5455 for (i = 0; i < adapter->num_rx_queues; i++) { 5679 for (i = 0; i < adapter->num_rx_queues; i++) {
5456 rsc_count += adapter->rx_ring[i]->rsc_count; 5680 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5457 rsc_flush += adapter->rx_ring[i]->rsc_flush; 5681 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
5458 } 5682 }
5459 adapter->rsc_total_count = rsc_count; 5683 adapter->rsc_total_count = rsc_count;
5460 adapter->rsc_total_flush = rsc_flush; 5684 adapter->rsc_total_flush = rsc_flush;
5461 } 5685 }
5462 5686
5687 for (i = 0; i < adapter->num_rx_queues; i++) {
5688 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5689 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5690 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5691 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5692 bytes += rx_ring->stats.bytes;
5693 packets += rx_ring->stats.packets;
5694 }
5695 adapter->non_eop_descs = non_eop_descs;
5696 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5697 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5698 netdev->stats.rx_bytes = bytes;
5699 netdev->stats.rx_packets = packets;
5700
5701 bytes = 0;
5702 packets = 0;
5463 /* gather some stats to the adapter struct that are per queue */ 5703 /* gather some stats to the adapter struct that are per queue */
5464 for (i = 0; i < adapter->num_tx_queues; i++) 5704 for (i = 0; i < adapter->num_tx_queues; i++) {
5465 restart_queue += adapter->tx_ring[i]->restart_queue; 5705 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5706 restart_queue += tx_ring->tx_stats.restart_queue;
5707 tx_busy += tx_ring->tx_stats.tx_busy;
5708 bytes += tx_ring->stats.bytes;
5709 packets += tx_ring->stats.packets;
5710 }
5466 adapter->restart_queue = restart_queue; 5711 adapter->restart_queue = restart_queue;
5467 5712 adapter->tx_busy = tx_busy;
5468 for (i = 0; i < adapter->num_rx_queues; i++) 5713 netdev->stats.tx_bytes = bytes;
5469 non_eop_descs += adapter->rx_ring[i]->non_eop_descs; 5714 netdev->stats.tx_packets = packets;
5470 adapter->non_eop_descs = non_eop_descs;
5471 5715
5472 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 5716 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5473 for (i = 0; i < 8; i++) { 5717 for (i = 0; i < 8; i++) {
@@ -5482,17 +5726,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5482 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 5726 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5483 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 5727 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5484 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 5728 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5485 if (hw->mac.type == ixgbe_mac_82599EB) { 5729 switch (hw->mac.type) {
5486 hwstats->pxonrxc[i] += 5730 case ixgbe_mac_82598EB:
5487 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5488 hwstats->pxoffrxc[i] +=
5489 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5490 hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5491 } else {
5492 hwstats->pxonrxc[i] += 5731 hwstats->pxonrxc[i] +=
5493 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 5732 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5494 hwstats->pxoffrxc[i] += 5733 break;
5495 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 5734 case ixgbe_mac_82599EB:
5735 case ixgbe_mac_X540:
5736 hwstats->pxonrxc[i] +=
5737 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5738 break;
5739 default:
5740 break;
5496 } 5741 }
5497 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 5742 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5498 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 5743 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5501,21 +5746,25 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5501 /* work around hardware counting issue */ 5746 /* work around hardware counting issue */
5502 hwstats->gprc -= missed_rx; 5747 hwstats->gprc -= missed_rx;
5503 5748
5749 ixgbe_update_xoff_received(adapter);
5750
5504 /* 82598 hardware only has a 32 bit counter in the high register */ 5751 /* 82598 hardware only has a 32 bit counter in the high register */
5505 if (hw->mac.type == ixgbe_mac_82599EB) { 5752 switch (hw->mac.type) {
5506 u64 tmp; 5753 case ixgbe_mac_82598EB:
5754 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5755 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5756 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5757 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5758 break;
5759 case ixgbe_mac_82599EB:
5760 case ixgbe_mac_X540:
5507 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5761 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5508 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; 5762 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
5509 /* 4 high bits of GORC */
5510 hwstats->gorc += (tmp << 32);
5511 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5763 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5512 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; 5764 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
5513 /* 4 high bits of GOTC */
5514 hwstats->gotc += (tmp << 32);
5515 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 5765 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5516 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 5766 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5517 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 5767 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5518 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5519 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 5768 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5520 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 5769 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5521#ifdef IXGBE_FCOE 5770#ifdef IXGBE_FCOE
@@ -5526,12 +5775,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5526 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 5775 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5527 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 5776 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5528#endif /* IXGBE_FCOE */ 5777#endif /* IXGBE_FCOE */
5529 } else { 5778 break;
5530 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 5779 default:
5531 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 5780 break;
5532 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5533 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5534 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5535 } 5781 }
5536 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 5782 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5537 hwstats->bprc += bprc; 5783 hwstats->bprc += bprc;
@@ -5704,8 +5950,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5704 5950
5705 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5951 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5706 for (i = 0; i < adapter->num_tx_queues; i++) 5952 for (i = 0; i < adapter->num_tx_queues; i++)
5707 set_bit(__IXGBE_FDIR_INIT_DONE, 5953 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5708 &(adapter->tx_ring[i]->reinit_state)); 5954 &(adapter->tx_ring[i]->state));
5709 } else { 5955 } else {
5710 e_err(probe, "failed to finish FDIR re-initialization, " 5956 e_err(probe, "failed to finish FDIR re-initialization, "
5711 "ignored adding FDIR ATR filters\n"); 5957 "ignored adding FDIR ATR filters\n");
@@ -5767,17 +6013,27 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5767 if (!netif_carrier_ok(netdev)) { 6013 if (!netif_carrier_ok(netdev)) {
5768 bool flow_rx, flow_tx; 6014 bool flow_rx, flow_tx;
5769 6015
5770 if (hw->mac.type == ixgbe_mac_82599EB) { 6016 switch (hw->mac.type) {
5771 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 6017 case ixgbe_mac_82598EB: {
5772 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5773 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5774 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5775 } else {
5776 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 6018 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5777 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 6019 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5778 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); 6020 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5779 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); 6021 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5780 } 6022 }
6023 break;
6024 case ixgbe_mac_82599EB:
6025 case ixgbe_mac_X540: {
6026 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6027 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6028 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6029 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6030 }
6031 break;
6032 default:
6033 flow_tx = false;
6034 flow_rx = false;
6035 break;
6036 }
5781 6037
5782 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 6038 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5783 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 6039 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5791,7 +6047,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5791 netif_carrier_on(netdev); 6047 netif_carrier_on(netdev);
5792 } else { 6048 } else {
5793 /* Force detection of hung controller */ 6049 /* Force detection of hung controller */
5794 adapter->detect_tx_hung = true; 6050 for (i = 0; i < adapter->num_tx_queues; i++) {
6051 tx_ring = adapter->tx_ring[i];
6052 set_check_for_tx_hang(tx_ring);
6053 }
5795 } 6054 }
5796 } else { 6055 } else {
5797 adapter->link_up = false; 6056 adapter->link_up = false;
@@ -6003,15 +6262,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
6003static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 6262static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6004 struct ixgbe_ring *tx_ring, 6263 struct ixgbe_ring *tx_ring,
6005 struct sk_buff *skb, u32 tx_flags, 6264 struct sk_buff *skb, u32 tx_flags,
6006 unsigned int first) 6265 unsigned int first, const u8 hdr_len)
6007{ 6266{
6008 struct pci_dev *pdev = adapter->pdev; 6267 struct device *dev = tx_ring->dev;
6009 struct ixgbe_tx_buffer *tx_buffer_info; 6268 struct ixgbe_tx_buffer *tx_buffer_info;
6010 unsigned int len; 6269 unsigned int len;
6011 unsigned int total = skb->len; 6270 unsigned int total = skb->len;
6012 unsigned int offset = 0, size, count = 0, i; 6271 unsigned int offset = 0, size, count = 0, i;
6013 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 6272 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6014 unsigned int f; 6273 unsigned int f;
6274 unsigned int bytecount = skb->len;
6275 u16 gso_segs = 1;
6015 6276
6016 i = tx_ring->next_to_use; 6277 i = tx_ring->next_to_use;
6017 6278
@@ -6026,10 +6287,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6026 6287
6027 tx_buffer_info->length = size; 6288 tx_buffer_info->length = size;
6028 tx_buffer_info->mapped_as_page = false; 6289 tx_buffer_info->mapped_as_page = false;
6029 tx_buffer_info->dma = dma_map_single(&pdev->dev, 6290 tx_buffer_info->dma = dma_map_single(dev,
6030 skb->data + offset, 6291 skb->data + offset,
6031 size, DMA_TO_DEVICE); 6292 size, DMA_TO_DEVICE);
6032 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 6293 if (dma_mapping_error(dev, tx_buffer_info->dma))
6033 goto dma_error; 6294 goto dma_error;
6034 tx_buffer_info->time_stamp = jiffies; 6295 tx_buffer_info->time_stamp = jiffies;
6035 tx_buffer_info->next_to_watch = i; 6296 tx_buffer_info->next_to_watch = i;
@@ -6062,12 +6323,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6062 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 6323 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6063 6324
6064 tx_buffer_info->length = size; 6325 tx_buffer_info->length = size;
6065 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, 6326 tx_buffer_info->dma = dma_map_page(dev,
6066 frag->page, 6327 frag->page,
6067 offset, size, 6328 offset, size,
6068 DMA_TO_DEVICE); 6329 DMA_TO_DEVICE);
6069 tx_buffer_info->mapped_as_page = true; 6330 tx_buffer_info->mapped_as_page = true;
6070 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 6331 if (dma_mapping_error(dev, tx_buffer_info->dma))
6071 goto dma_error; 6332 goto dma_error;
6072 tx_buffer_info->time_stamp = jiffies; 6333 tx_buffer_info->time_stamp = jiffies;
6073 tx_buffer_info->next_to_watch = i; 6334 tx_buffer_info->next_to_watch = i;
@@ -6081,6 +6342,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6081 break; 6342 break;
6082 } 6343 }
6083 6344
6345 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6346 gso_segs = skb_shinfo(skb)->gso_segs;
6347#ifdef IXGBE_FCOE
6348 /* adjust for FCoE Sequence Offload */
6349 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6350 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6351 skb_shinfo(skb)->gso_size);
6352#endif /* IXGBE_FCOE */
6353 bytecount += (gso_segs - 1) * hdr_len;
6354
6355 /* multiply data chunks by size of headers */
6356 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6357 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
6084 tx_ring->tx_buffer_info[i].skb = skb; 6358 tx_ring->tx_buffer_info[i].skb = skb;
6085 tx_ring->tx_buffer_info[first].next_to_watch = i; 6359 tx_ring->tx_buffer_info[first].next_to_watch = i;
6086 6360
@@ -6102,14 +6376,13 @@ dma_error:
6102 i += tx_ring->count; 6376 i += tx_ring->count;
6103 i--; 6377 i--;
6104 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6378 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6105 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 6379 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
6106 } 6380 }
6107 6381
6108 return 0; 6382 return 0;
6109} 6383}
6110 6384
6111static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 6385static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
6112 struct ixgbe_ring *tx_ring,
6113 int tx_flags, int count, u32 paylen, u8 hdr_len) 6386 int tx_flags, int count, u32 paylen, u8 hdr_len)
6114{ 6387{
6115 union ixgbe_adv_tx_desc *tx_desc = NULL; 6388 union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6174,60 +6447,46 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6174 wmb(); 6447 wmb();
6175 6448
6176 tx_ring->next_to_use = i; 6449 tx_ring->next_to_use = i;
6177 writel(i, adapter->hw.hw_addr + tx_ring->tail); 6450 writel(i, tx_ring->tail);
6178} 6451}
6179 6452
6180static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6453static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6181 int queue, u32 tx_flags, __be16 protocol) 6454 u8 queue, u32 tx_flags, __be16 protocol)
6182{ 6455{
6183 struct ixgbe_atr_input atr_input; 6456 struct ixgbe_atr_input atr_input;
6184 struct tcphdr *th;
6185 struct iphdr *iph = ip_hdr(skb); 6457 struct iphdr *iph = ip_hdr(skb);
6186 struct ethhdr *eth = (struct ethhdr *)skb->data; 6458 struct ethhdr *eth = (struct ethhdr *)skb->data;
6187 u16 vlan_id, src_port, dst_port, flex_bytes; 6459 struct tcphdr *th;
6188 u32 src_ipv4_addr, dst_ipv4_addr; 6460 u16 vlan_id;
6189 u8 l4type = 0;
6190 6461
6191 /* Right now, we support IPv4 only */ 6462 /* Right now, we support IPv4 w/ TCP only */
6192 if (protocol != htons(ETH_P_IP)) 6463 if (protocol != htons(ETH_P_IP) ||
6464 iph->protocol != IPPROTO_TCP)
6193 return; 6465 return;
6194 /* check if we're UDP or TCP */
6195 if (iph->protocol == IPPROTO_TCP) {
6196 th = tcp_hdr(skb);
6197 src_port = th->source;
6198 dst_port = th->dest;
6199 l4type |= IXGBE_ATR_L4TYPE_TCP;
6200 /* l4type IPv4 type is 0, no need to assign */
6201 } else {
6202 /* Unsupported L4 header, just bail here */
6203 return;
6204 }
6205 6466
6206 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 6467 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6207 6468
6208 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 6469 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
6209 IXGBE_TX_FLAGS_VLAN_SHIFT; 6470 IXGBE_TX_FLAGS_VLAN_SHIFT;
6210 src_ipv4_addr = iph->saddr; 6471
6211 dst_ipv4_addr = iph->daddr; 6472 th = tcp_hdr(skb);
6212 flex_bytes = eth->h_proto;
6213 6473
6214 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); 6474 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
6215 ixgbe_atr_set_src_port_82599(&atr_input, dst_port); 6475 ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
6216 ixgbe_atr_set_dst_port_82599(&atr_input, src_port); 6476 ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
6217 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); 6477 ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
6218 ixgbe_atr_set_l4type_82599(&atr_input, l4type); 6478 ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
6219 /* src and dst are inverted, think how the receiver sees them */ 6479 /* src and dst are inverted, think how the receiver sees them */
6220 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); 6480 ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
6221 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); 6481 ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
6222 6482
6223 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 6483 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6224 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); 6484 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
6225} 6485}
6226 6486
6227static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 6487static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
6228 struct ixgbe_ring *tx_ring, int size)
6229{ 6488{
6230 netif_stop_subqueue(netdev, tx_ring->queue_index); 6489 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6231 /* Herbert's original patch had: 6490 /* Herbert's original patch had:
6232 * smp_mb__after_netif_stop_queue(); 6491 * smp_mb__after_netif_stop_queue();
6233 * but since that doesn't exist yet, just open code it. */ 6492 * but since that doesn't exist yet, just open code it. */
@@ -6239,17 +6498,16 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6239 return -EBUSY; 6498 return -EBUSY;
6240 6499
6241 /* A reprieve! - use start_queue because it doesn't call schedule */ 6500 /* A reprieve! - use start_queue because it doesn't call schedule */
6242 netif_start_subqueue(netdev, tx_ring->queue_index); 6501 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
6243 ++tx_ring->restart_queue; 6502 ++tx_ring->tx_stats.restart_queue;
6244 return 0; 6503 return 0;
6245} 6504}
6246 6505
6247static int ixgbe_maybe_stop_tx(struct net_device *netdev, 6506static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
6248 struct ixgbe_ring *tx_ring, int size)
6249{ 6507{
6250 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 6508 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6251 return 0; 6509 return 0;
6252 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); 6510 return __ixgbe_maybe_stop_tx(tx_ring, size);
6253} 6511}
6254 6512
6255static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6513static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
@@ -6294,10 +6552,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6294 return skb_tx_hash(dev, skb); 6552 return skb_tx_hash(dev, skb);
6295} 6553}
6296 6554
6297netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev, 6555netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6298 struct ixgbe_adapter *adapter, 6556 struct ixgbe_adapter *adapter,
6299 struct ixgbe_ring *tx_ring) 6557 struct ixgbe_ring *tx_ring)
6300{ 6558{
6559 struct net_device *netdev = tx_ring->netdev;
6301 struct netdev_queue *txq; 6560 struct netdev_queue *txq;
6302 unsigned int first; 6561 unsigned int first;
6303 unsigned int tx_flags = 0; 6562 unsigned int tx_flags = 0;
@@ -6355,8 +6614,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6355 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 6614 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6356 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 6615 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6357 6616
6358 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { 6617 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
6359 adapter->tx_busy++; 6618 tx_ring->tx_stats.tx_busy++;
6360 return NETDEV_TX_BUSY; 6619 return NETDEV_TX_BUSY;
6361 } 6620 }
6362 6621
@@ -6390,14 +6649,14 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6390 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6649 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6391 } 6650 }
6392 6651
6393 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); 6652 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
6394 if (count) { 6653 if (count) {
6395 /* add the ATR filter if ATR is on */ 6654 /* add the ATR filter if ATR is on */
6396 if (tx_ring->atr_sample_rate) { 6655 if (tx_ring->atr_sample_rate) {
6397 ++tx_ring->atr_count; 6656 ++tx_ring->atr_count;
6398 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && 6657 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6399 test_bit(__IXGBE_FDIR_INIT_DONE, 6658 test_bit(__IXGBE_TX_FDIR_INIT_DONE,
6400 &tx_ring->reinit_state)) { 6659 &tx_ring->state)) {
6401 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6660 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6402 tx_flags, protocol); 6661 tx_flags, protocol);
6403 tx_ring->atr_count = 0; 6662 tx_ring->atr_count = 0;
@@ -6406,9 +6665,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6406 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); 6665 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6407 txq->tx_bytes += skb->len; 6666 txq->tx_bytes += skb->len;
6408 txq->tx_packets++; 6667 txq->tx_packets++;
6409 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, 6668 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
6410 hdr_len); 6669 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6411 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
6412 6670
6413 } else { 6671 } else {
6414 dev_kfree_skb_any(skb); 6672 dev_kfree_skb_any(skb);
@@ -6425,7 +6683,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd
6425 struct ixgbe_ring *tx_ring; 6683 struct ixgbe_ring *tx_ring;
6426 6684
6427 tx_ring = adapter->tx_ring[skb->queue_mapping]; 6685 tx_ring = adapter->tx_ring[skb->queue_mapping];
6428 return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring); 6686 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
6429} 6687}
6430 6688
6431/** 6689/**
@@ -6566,20 +6824,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6566 6824
6567 /* accurate rx/tx bytes/packets stats */ 6825 /* accurate rx/tx bytes/packets stats */
6568 dev_txq_stats_fold(netdev, stats); 6826 dev_txq_stats_fold(netdev, stats);
6827 rcu_read_lock();
6569 for (i = 0; i < adapter->num_rx_queues; i++) { 6828 for (i = 0; i < adapter->num_rx_queues; i++) {
6570 struct ixgbe_ring *ring = adapter->rx_ring[i]; 6829 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
6571 u64 bytes, packets; 6830 u64 bytes, packets;
6572 unsigned int start; 6831 unsigned int start;
6573 6832
6574 do { 6833 if (ring) {
6575 start = u64_stats_fetch_begin_bh(&ring->syncp); 6834 do {
6576 packets = ring->stats.packets; 6835 start = u64_stats_fetch_begin_bh(&ring->syncp);
6577 bytes = ring->stats.bytes; 6836 packets = ring->stats.packets;
6578 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 6837 bytes = ring->stats.bytes;
6579 stats->rx_packets += packets; 6838 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6580 stats->rx_bytes += bytes; 6839 stats->rx_packets += packets;
6840 stats->rx_bytes += bytes;
6841 }
6581 } 6842 }
6582 6843 rcu_read_unlock();
6583 /* following stats updated by ixgbe_watchdog_task() */ 6844 /* following stats updated by ixgbe_watchdog_task() */
6584 stats->multicast = netdev->stats.multicast; 6845 stats->multicast = netdev->stats.multicast;
6585 stats->rx_errors = netdev->stats.rx_errors; 6846 stats->rx_errors = netdev->stats.rx_errors;
@@ -6694,11 +6955,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6694 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 6955 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
6695 static int cards_found; 6956 static int cards_found;
6696 int i, err, pci_using_dac; 6957 int i, err, pci_using_dac;
6958 u8 part_str[IXGBE_PBANUM_LENGTH];
6697 unsigned int indices = num_possible_cpus(); 6959 unsigned int indices = num_possible_cpus();
6698#ifdef IXGBE_FCOE 6960#ifdef IXGBE_FCOE
6699 u16 device_caps; 6961 u16 device_caps;
6700#endif 6962#endif
6701 u32 part_num, eec; 6963 u32 eec;
6702 6964
6703 /* Catch broken hardware that put the wrong VF device ID in 6965 /* Catch broken hardware that put the wrong VF device ID in
6704 * the PCIe SR-IOV capability. 6966 * the PCIe SR-IOV capability.
@@ -6761,8 +7023,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6761 7023
6762 SET_NETDEV_DEV(netdev, &pdev->dev); 7024 SET_NETDEV_DEV(netdev, &pdev->dev);
6763 7025
6764 pci_set_drvdata(pdev, netdev);
6765 adapter = netdev_priv(netdev); 7026 adapter = netdev_priv(netdev);
7027 pci_set_drvdata(pdev, adapter);
6766 7028
6767 adapter->netdev = netdev; 7029 adapter->netdev = netdev;
6768 adapter->pdev = pdev; 7030 adapter->pdev = pdev;
@@ -6835,8 +7097,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6835 goto err_sw_init; 7097 goto err_sw_init;
6836 7098
6837 /* Make it possible the adapter to be woken up via WOL */ 7099 /* Make it possible the adapter to be woken up via WOL */
6838 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 7100 switch (adapter->hw.mac.type) {
7101 case ixgbe_mac_82599EB:
7102 case ixgbe_mac_X540:
6839 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 7103 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7104 break;
7105 default:
7106 break;
7107 }
6840 7108
6841 /* 7109 /*
6842 * If there is a fan on this device and it has failed log the 7110 * If there is a fan on this device and it has failed log the
@@ -6944,8 +7212,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6944 goto err_eeprom; 7212 goto err_eeprom;
6945 } 7213 }
6946 7214
6947 /* power down the optics */ 7215 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
6948 if (hw->phy.multispeed_fiber) 7216 if (hw->mac.ops.disable_tx_laser &&
7217 ((hw->phy.multispeed_fiber) ||
7218 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
7219 (hw->mac.type == ixgbe_mac_82599EB))))
6949 hw->mac.ops.disable_tx_laser(hw); 7220 hw->mac.ops.disable_tx_laser(hw);
6950 7221
6951 init_timer(&adapter->watchdog_timer); 7222 init_timer(&adapter->watchdog_timer);
@@ -6960,6 +7231,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6960 goto err_sw_init; 7231 goto err_sw_init;
6961 7232
6962 switch (pdev->device) { 7233 switch (pdev->device) {
7234 case IXGBE_DEV_ID_82599_SFP:
7235 /* Only this subdevice supports WOL */
7236 if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
7237 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7238 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7239 break;
7240 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7241 /* All except this subdevice support WOL */
7242 if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7243 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7244 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7245 break;
6963 case IXGBE_DEV_ID_82599_KX4: 7246 case IXGBE_DEV_ID_82599_KX4:
6964 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 7247 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6965 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 7248 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
@@ -6983,16 +7266,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6983 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : 7266 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
6984 "Unknown"), 7267 "Unknown"),
6985 netdev->dev_addr); 7268 netdev->dev_addr);
6986 ixgbe_read_pba_num_generic(hw, &part_num); 7269
7270 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7271 if (err)
7272 strcpy(part_str, "Unknown");
6987 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 7273 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6988 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " 7274 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
6989 "PBA No: %06x-%03x\n",
6990 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 7275 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
6991 (part_num >> 8), (part_num & 0xff)); 7276 part_str);
6992 else 7277 else
6993 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 7278 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
6994 hw->mac.type, hw->phy.type, 7279 hw->mac.type, hw->phy.type, part_str);
6995 (part_num >> 8), (part_num & 0xff));
6996 7280
6997 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { 7281 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
6998 e_dev_warn("PCI-Express bandwidth available for this card is " 7282 e_dev_warn("PCI-Express bandwidth available for this card is "
@@ -7085,8 +7369,8 @@ err_dma:
7085 **/ 7369 **/
7086static void __devexit ixgbe_remove(struct pci_dev *pdev) 7370static void __devexit ixgbe_remove(struct pci_dev *pdev)
7087{ 7371{
7088 struct net_device *netdev = pci_get_drvdata(pdev); 7372 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7089 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7373 struct net_device *netdev = adapter->netdev;
7090 7374
7091 set_bit(__IXGBE_DOWN, &adapter->state); 7375 set_bit(__IXGBE_DOWN, &adapter->state);
7092 /* clear the module not found bit to make sure the worker won't 7376 /* clear the module not found bit to make sure the worker won't
@@ -7156,8 +7440,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7156static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 7440static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7157 pci_channel_state_t state) 7441 pci_channel_state_t state)
7158{ 7442{
7159 struct net_device *netdev = pci_get_drvdata(pdev); 7443 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7160 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7444 struct net_device *netdev = adapter->netdev;
7161 7445
7162 netif_device_detach(netdev); 7446 netif_device_detach(netdev);
7163 7447
@@ -7180,8 +7464,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7180 */ 7464 */
7181static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) 7465static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7182{ 7466{
7183 struct net_device *netdev = pci_get_drvdata(pdev); 7467 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7184 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7185 pci_ers_result_t result; 7468 pci_ers_result_t result;
7186 int err; 7469 int err;
7187 7470
@@ -7219,8 +7502,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7219 */ 7502 */
7220static void ixgbe_io_resume(struct pci_dev *pdev) 7503static void ixgbe_io_resume(struct pci_dev *pdev)
7221{ 7504{
7222 struct net_device *netdev = pci_get_drvdata(pdev); 7505 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7223 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7506 struct net_device *netdev = adapter->netdev;
7224 7507
7225 if (netif_running(netdev)) { 7508 if (netif_running(netdev)) {
7226 if (ixgbe_up(adapter)) { 7509 if (ixgbe_up(adapter)) {
@@ -7285,6 +7568,7 @@ static void __exit ixgbe_exit_module(void)
7285 dca_unregister_notify(&dca_notifier); 7568 dca_unregister_notify(&dca_notifier);
7286#endif 7569#endif
7287 pci_unregister_driver(&ixgbe_driver); 7570 pci_unregister_driver(&ixgbe_driver);
7571 rcu_barrier(); /* Wait for completion of call_rcu()'s */
7288} 7572}
7289 7573
7290#ifdef CONFIG_IXGBE_DCA 7574#ifdef CONFIG_IXGBE_DCA
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index 471f0f2cdb9..027c628c3aa 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -319,8 +319,14 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
319 u32 vflre = 0; 319 u32 vflre = 0;
320 s32 ret_val = IXGBE_ERR_MBX; 320 s32 ret_val = IXGBE_ERR_MBX;
321 321
322 if (hw->mac.type == ixgbe_mac_82599EB) 322 switch (hw->mac.type) {
323 case ixgbe_mac_82599EB:
324 case ixgbe_mac_X540:
323 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); 325 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
326 break;
327 default:
328 break;
329 }
324 330
325 if (vflre & (1 << vf_shift)) { 331 if (vflre & (1 << vf_shift)) {
326 ret_val = 0; 332 ret_val = 0;
@@ -439,22 +445,26 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
439{ 445{
440 struct ixgbe_mbx_info *mbx = &hw->mbx; 446 struct ixgbe_mbx_info *mbx = &hw->mbx;
441 447
442 if (hw->mac.type != ixgbe_mac_82599EB) 448 switch (hw->mac.type) {
443 return; 449 case ixgbe_mac_82599EB:
444 450 case ixgbe_mac_X540:
445 mbx->timeout = 0; 451 mbx->timeout = 0;
446 mbx->usec_delay = 0; 452 mbx->usec_delay = 0;
447 453
448 mbx->size = IXGBE_VFMAILBOX_SIZE; 454 mbx->size = IXGBE_VFMAILBOX_SIZE;
449 455
450 mbx->stats.msgs_tx = 0; 456 mbx->stats.msgs_tx = 0;
451 mbx->stats.msgs_rx = 0; 457 mbx->stats.msgs_rx = 0;
452 mbx->stats.reqs = 0; 458 mbx->stats.reqs = 0;
453 mbx->stats.acks = 0; 459 mbx->stats.acks = 0;
454 mbx->stats.rsts = 0; 460 mbx->stats.rsts = 0;
461 break;
462 default:
463 break;
464 }
455} 465}
456 466
457struct ixgbe_mbx_operations mbx_ops_82599 = { 467struct ixgbe_mbx_operations mbx_ops_generic = {
458 .read = ixgbe_read_mbx_pf, 468 .read = ixgbe_read_mbx_pf,
459 .write = ixgbe_write_mbx_pf, 469 .write = ixgbe_write_mbx_pf,
460 .read_posted = ixgbe_read_posted_mbx, 470 .read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 7e0d08ff5b5..3df9b159021 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -88,6 +88,6 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); 88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); 89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
90 90
91extern struct ixgbe_mbx_operations mbx_ops_82599; 91extern struct ixgbe_mbx_operations mbx_ops_generic;
92 92
93#endif /* _IXGBE_MBX_H_ */ 93#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 6c0d42e33f2..c445fbce56e 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -115,6 +115,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
115 case TN1010_PHY_ID: 115 case TN1010_PHY_ID:
116 phy_type = ixgbe_phy_tn; 116 phy_type = ixgbe_phy_tn;
117 break; 117 break;
118 case AQ1202_PHY_ID:
119 phy_type = ixgbe_phy_aq;
120 break;
118 case QT2022_PHY_ID: 121 case QT2022_PHY_ID:
119 phy_type = ixgbe_phy_qt; 122 phy_type = ixgbe_phy_qt;
120 break; 123 break;
@@ -425,6 +428,39 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
425} 428}
426 429
427/** 430/**
431 * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
432 * @hw: pointer to hardware structure
433 * @speed: pointer to link speed
434 * @autoneg: boolean auto-negotiation value
435 *
436 * Determines the link capabilities by reading the AUTOC register.
437 */
438s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
439 ixgbe_link_speed *speed,
440 bool *autoneg)
441{
442 s32 status = IXGBE_ERR_LINK_SETUP;
443 u16 speed_ability;
444
445 *speed = 0;
446 *autoneg = true;
447
448 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
449 &speed_ability);
450
451 if (status == 0) {
452 if (speed_ability & MDIO_SPEED_10G)
453 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
454 if (speed_ability & MDIO_PMA_SPEED_1000)
455 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
456 if (speed_ability & MDIO_PMA_SPEED_100)
457 *speed |= IXGBE_LINK_SPEED_100_FULL;
458 }
459
460 return status;
461}
462
463/**
428 * ixgbe_reset_phy_nl - Performs a PHY reset 464 * ixgbe_reset_phy_nl - Performs a PHY reset
429 * @hw: pointer to hardware structure 465 * @hw: pointer to hardware structure
430 **/ 466 **/
@@ -1378,6 +1414,22 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
1378} 1414}
1379 1415
1380/** 1416/**
1417 * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
1418 * @hw: pointer to hardware structure
1419 * @firmware_version: pointer to the PHY Firmware Version
1420**/
1421s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
1422 u16 *firmware_version)
1423{
1424 s32 status = 0;
1425
1426 status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
1427 firmware_version);
1428
1429 return status;
1430}
1431
1432/**
1381 * ixgbe_tn_check_overtemp - Checks if an overtemp occured. 1433 * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
1382 * @hw: pointer to hardware structure 1434 * @hw: pointer to hardware structure
1383 * 1435 *
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index fb3898f12fc..e2c6b7eac64 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -96,6 +96,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
96 ixgbe_link_speed speed, 96 ixgbe_link_speed speed,
97 bool autoneg, 97 bool autoneg,
98 bool autoneg_wait_to_complete); 98 bool autoneg_wait_to_complete);
99s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
100 ixgbe_link_speed *speed,
101 bool *autoneg);
99 102
100/* PHY specific */ 103/* PHY specific */
101s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 104s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
@@ -103,6 +106,8 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
103 bool *link_up); 106 bool *link_up);
104s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 107s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
105 u16 *firmware_version); 108 u16 *firmware_version);
109s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
110 u16 *firmware_version);
106 111
107s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 112s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
108s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 113s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 5428153af8f..6e3e94b5a5f 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -68,7 +68,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
68 * addresses 68 * addresses
69 */ 69 */
70 for (i = 0; i < entries; i++) { 70 for (i = 0; i < entries; i++) {
71 vfinfo->vf_mc_hashes[i] = hash_list[i];; 71 vfinfo->vf_mc_hashes[i] = hash_list[i];
72 } 72 }
73 73
74 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { 74 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
@@ -178,8 +178,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
178int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) 178int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
179{ 179{
180 unsigned char vf_mac_addr[6]; 180 unsigned char vf_mac_addr[6];
181 struct net_device *netdev = pci_get_drvdata(pdev); 181 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
182 struct ixgbe_adapter *adapter = netdev_priv(netdev);
183 unsigned int vfn = (event_mask & 0x3f); 182 unsigned int vfn = (event_mask & 0x3f);
184 183
185 bool enable = ((event_mask & 0x10000000U) != 0); 184 bool enable = ((event_mask & 0x10000000U) != 0);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index d3cc6ce7c97..0f80893edab 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -54,9 +54,14 @@
54#define IXGBE_DEV_ID_82599_T3_LOM 0x151C 54#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
55#define IXGBE_DEV_ID_82599_CX4 0x10F9 55#define IXGBE_DEV_ID_82599_CX4 0x10F9
56#define IXGBE_DEV_ID_82599_SFP 0x10FB 56#define IXGBE_DEV_ID_82599_SFP 0x10FB
57#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
58#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
59#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
57#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 60#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
58#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 61#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
59#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 62#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
63#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
64#define IXGBE_DEV_ID_X540T 0x1528
60 65
61/* General Registers */ 66/* General Registers */
62#define IXGBE_CTRL 0x00000 67#define IXGBE_CTRL 0x00000
@@ -994,8 +999,10 @@
994/* PHY IDs*/ 999/* PHY IDs*/
995#define TN1010_PHY_ID 0x00A19410 1000#define TN1010_PHY_ID 0x00A19410
996#define TNX_FW_REV 0xB 1001#define TNX_FW_REV 0xB
1002#define AQ1202_PHY_ID 0x03A1B440
997#define QT2022_PHY_ID 0x0043A400 1003#define QT2022_PHY_ID 0x0043A400
998#define ATH_PHY_ID 0x03429050 1004#define ATH_PHY_ID 0x03429050
1005#define AQ_FW_REV 0x20
999 1006
1000/* PHY Types */ 1007/* PHY Types */
1001#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 1008#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
@@ -1463,6 +1470,8 @@
1463#define IXGBE_ANLP1_PAUSE 0x0C00 1470#define IXGBE_ANLP1_PAUSE 0x0C00
1464#define IXGBE_ANLP1_SYM_PAUSE 0x0400 1471#define IXGBE_ANLP1_SYM_PAUSE 0x0400
1465#define IXGBE_ANLP1_ASM_PAUSE 0x0800 1472#define IXGBE_ANLP1_ASM_PAUSE 0x0800
1473#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
1474
1466 1475
1467/* SW Semaphore Register bitmasks */ 1476/* SW Semaphore Register bitmasks */
1468#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1477#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
@@ -1491,6 +1500,7 @@
1491#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ 1500#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
1492#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ 1501#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
1493#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ 1502#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
1503#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
1494#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ 1504#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
1495/* EEPROM Addressing bits based on type (0-small, 1-large) */ 1505/* EEPROM Addressing bits based on type (0-small, 1-large) */
1496#define IXGBE_EEC_ADDR_SIZE 0x00000400 1506#define IXGBE_EEC_ADDR_SIZE 0x00000400
@@ -1500,12 +1510,18 @@
1500#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 1510#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
1501#define IXGBE_EEPROM_OPCODE_BITS 8 1511#define IXGBE_EEPROM_OPCODE_BITS 8
1502 1512
1513/* Part Number String Length */
1514#define IXGBE_PBANUM_LENGTH 11
1515
1503/* Checksum and EEPROM pointers */ 1516/* Checksum and EEPROM pointers */
1517#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
1504#define IXGBE_EEPROM_CHECKSUM 0x3F 1518#define IXGBE_EEPROM_CHECKSUM 0x3F
1505#define IXGBE_EEPROM_SUM 0xBABA 1519#define IXGBE_EEPROM_SUM 0xBABA
1506#define IXGBE_PCIE_ANALOG_PTR 0x03 1520#define IXGBE_PCIE_ANALOG_PTR 0x03
1507#define IXGBE_ATLAS0_CONFIG_PTR 0x04 1521#define IXGBE_ATLAS0_CONFIG_PTR 0x04
1522#define IXGBE_PHY_PTR 0x04
1508#define IXGBE_ATLAS1_CONFIG_PTR 0x05 1523#define IXGBE_ATLAS1_CONFIG_PTR 0x05
1524#define IXGBE_OPTION_ROM_PTR 0x05
1509#define IXGBE_PCIE_GENERAL_PTR 0x06 1525#define IXGBE_PCIE_GENERAL_PTR 0x06
1510#define IXGBE_PCIE_CONFIG0_PTR 0x07 1526#define IXGBE_PCIE_CONFIG0_PTR 0x07
1511#define IXGBE_PCIE_CONFIG1_PTR 0x08 1527#define IXGBE_PCIE_CONFIG1_PTR 0x08
@@ -2113,6 +2129,14 @@ typedef u32 ixgbe_physical_layer;
2113#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 2129#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
2114#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 2130#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
2115 2131
2132/* Flow Control Macros */
2133#define PAUSE_RTT 8
2134#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
2135
2136#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
2137 PAUSE_MTU(MTU))
2138#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
2139
2116/* Software ATR hash keys */ 2140/* Software ATR hash keys */
2117#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D 2141#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
2118#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 2142#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
@@ -2164,6 +2188,7 @@ struct ixgbe_atr_input_masks {
2164enum ixgbe_eeprom_type { 2188enum ixgbe_eeprom_type {
2165 ixgbe_eeprom_uninitialized = 0, 2189 ixgbe_eeprom_uninitialized = 0,
2166 ixgbe_eeprom_spi, 2190 ixgbe_eeprom_spi,
2191 ixgbe_flash,
2167 ixgbe_eeprom_none /* No NVM support */ 2192 ixgbe_eeprom_none /* No NVM support */
2168}; 2193};
2169 2194
@@ -2171,12 +2196,14 @@ enum ixgbe_mac_type {
2171 ixgbe_mac_unknown = 0, 2196 ixgbe_mac_unknown = 0,
2172 ixgbe_mac_82598EB, 2197 ixgbe_mac_82598EB,
2173 ixgbe_mac_82599EB, 2198 ixgbe_mac_82599EB,
2199 ixgbe_mac_X540,
2174 ixgbe_num_macs 2200 ixgbe_num_macs
2175}; 2201};
2176 2202
2177enum ixgbe_phy_type { 2203enum ixgbe_phy_type {
2178 ixgbe_phy_unknown = 0, 2204 ixgbe_phy_unknown = 0,
2179 ixgbe_phy_tn, 2205 ixgbe_phy_tn,
2206 ixgbe_phy_aq,
2180 ixgbe_phy_cu_unknown, 2207 ixgbe_phy_cu_unknown,
2181 ixgbe_phy_qt, 2208 ixgbe_phy_qt,
2182 ixgbe_phy_xaui, 2209 ixgbe_phy_xaui,
@@ -2405,6 +2432,7 @@ struct ixgbe_eeprom_operations {
2405 s32 (*write)(struct ixgbe_hw *, u16, u16); 2432 s32 (*write)(struct ixgbe_hw *, u16, u16);
2406 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); 2433 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
2407 s32 (*update_checksum)(struct ixgbe_hw *); 2434 s32 (*update_checksum)(struct ixgbe_hw *);
2435 u16 (*calc_checksum)(struct ixgbe_hw *);
2408}; 2436};
2409 2437
2410struct ixgbe_mac_operations { 2438struct ixgbe_mac_operations {
@@ -2574,6 +2602,7 @@ struct ixgbe_hw {
2574 u16 subsystem_vendor_id; 2602 u16 subsystem_vendor_id;
2575 u8 revision_id; 2603 u8 revision_id;
2576 bool adapter_stopped; 2604 bool adapter_stopped;
2605 bool force_full_reset;
2577}; 2606};
2578 2607
2579struct ixgbe_info { 2608struct ixgbe_info {
@@ -2614,6 +2643,9 @@ struct ixgbe_info {
2614#define IXGBE_ERR_NO_SPACE -25 2643#define IXGBE_ERR_NO_SPACE -25
2615#define IXGBE_ERR_OVERTEMP -26 2644#define IXGBE_ERR_OVERTEMP -26
2616#define IXGBE_ERR_RAR_INDEX -27 2645#define IXGBE_ERR_RAR_INDEX -27
2646#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2647#define IXGBE_ERR_PBA_SECTION -31
2648#define IXGBE_ERR_INVALID_ARGUMENT -32
2617#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2649#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
2618 2650
2619#endif /* _IXGBE_TYPE_H_ */ 2651#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
new file mode 100644
index 00000000000..9649fa727e3
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -0,0 +1,722 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include <linux/sched.h>
31
32#include "ixgbe.h"
33#include "ixgbe_phy.h"
34//#include "ixgbe_mbx.h"
35
36#define IXGBE_X540_MAX_TX_QUEUES 128
37#define IXGBE_X540_MAX_RX_QUEUES 128
38#define IXGBE_X540_RAR_ENTRIES 128
39#define IXGBE_X540_MC_TBL_SIZE 128
40#define IXGBE_X540_VFT_TBL_SIZE 128
41
42static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
43static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
44static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
45static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
46static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
47static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
48
49static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
50{
51 return ixgbe_media_type_copper;
52}
53
54static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
55{
56 struct ixgbe_mac_info *mac = &hw->mac;
57
58 /* Call PHY identify routine to get the phy type */
59 ixgbe_identify_phy_generic(hw);
60
61 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
62 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
63 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
64 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
65 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
66 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
67
68 return 0;
69}
70
71/**
72 * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
73 * @hw: pointer to hardware structure
74 * @speed: new link speed
75 * @autoneg: true if autonegotiation enabled
76 * @autoneg_wait_to_complete: true when waiting for completion is needed
77 **/
78static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
79 ixgbe_link_speed speed, bool autoneg,
80 bool autoneg_wait_to_complete)
81{
82 return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
83 autoneg_wait_to_complete);
84}
85
86/**
87 * ixgbe_reset_hw_X540 - Perform hardware reset
88 * @hw: pointer to hardware structure
89 *
90 * Resets the hardware by resetting the transmit and receive units, masks
91 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
92 * reset.
93 **/
94static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
95{
96 ixgbe_link_speed link_speed;
97 s32 status = 0;
98 u32 ctrl;
99 u32 ctrl_ext;
100 u32 reset_bit;
101 u32 i;
102 u32 autoc;
103 u32 autoc2;
104 bool link_up = false;
105
106 /* Call adapter stop to disable tx/rx and clear interrupts */
107 hw->mac.ops.stop_adapter(hw);
108
109 /*
110 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
111 * access and verify no pending requests before reset
112 */
113 status = ixgbe_disable_pcie_master(hw);
114 if (status != 0) {
115 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
116 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
117 }
118
119 /*
120 * Issue global reset to the MAC. Needs to be SW reset if link is up.
121 * If link reset is used when link is up, it might reset the PHY when
122 * mng is using it. If link is down or the flag to force full link
123 * reset is set, then perform link reset.
124 */
125 if (hw->force_full_reset) {
126 reset_bit = IXGBE_CTRL_LNK_RST;
127 } else {
128 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
129 if (!link_up)
130 reset_bit = IXGBE_CTRL_LNK_RST;
131 else
132 reset_bit = IXGBE_CTRL_RST;
133 }
134
135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
137 IXGBE_WRITE_FLUSH(hw);
138
139 /* Poll for reset bit to self-clear indicating reset is complete */
140 for (i = 0; i < 10; i++) {
141 udelay(1);
142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
143 if (!(ctrl & IXGBE_CTRL_RST))
144 break;
145 }
146 if (ctrl & IXGBE_CTRL_RST) {
147 status = IXGBE_ERR_RESET_FAILED;
148 hw_dbg(hw, "Reset polling failed to complete.\n");
149 }
150
151 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
152 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
153 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
154 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
155
156 msleep(50);
157
158 /* Set the Rx packet buffer size. */
159 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
160
161 /* Store the permanent mac address */
162 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
163
164 /*
165 * Store the original AUTOC/AUTOC2 values if they have not been
166 * stored off yet. Otherwise restore the stored original
167 * values since the reset operation sets back to defaults.
168 */
169 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
170 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
171 if (hw->mac.orig_link_settings_stored == false) {
172 hw->mac.orig_autoc = autoc;
173 hw->mac.orig_autoc2 = autoc2;
174 hw->mac.orig_link_settings_stored = true;
175 } else {
176 if (autoc != hw->mac.orig_autoc)
177 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
178 IXGBE_AUTOC_AN_RESTART));
179
180 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
181 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
182 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
183 autoc2 |= (hw->mac.orig_autoc2 &
184 IXGBE_AUTOC2_UPPER_MASK);
185 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
186 }
187 }
188
189 /*
190 * Store MAC address from RAR0, clear receive address registers, and
191 * clear the multicast table. Also reset num_rar_entries to 128,
192 * since we modify this value when programming the SAN MAC address.
193 */
194 hw->mac.num_rar_entries = 128;
195 hw->mac.ops.init_rx_addrs(hw);
196
197 /* Store the permanent mac address */
198 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
199
200 /* Store the permanent SAN mac address */
201 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
202
203 /* Add the SAN MAC address to the RAR only if it's a valid address */
204 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
205 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
206 hw->mac.san_addr, 0, IXGBE_RAH_AV);
207
208 /* Reserve the last RAR for the SAN MAC address */
209 hw->mac.num_rar_entries--;
210 }
211
212 /* Store the alternative WWNN/WWPN prefix */
213 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
214 &hw->mac.wwpn_prefix);
215
216 return status;
217}
218
219/**
220 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
221 * @hw: pointer to hardware structure
222 *
223 * Determines physical layer capabilities of the current configuration.
224 **/
225static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
226{
227 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
228 u16 ext_ability = 0;
229
230 hw->phy.ops.identify(hw);
231
232 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
233 &ext_ability);
234 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
235 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
236 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
237 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
238 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
239 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
240
241 return physical_layer;
242}
243
244/**
245 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
246 * @hw: pointer to hardware structure
247 **/
248static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
249{
250 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
251 u32 eec;
252 u16 eeprom_size;
253
254 if (eeprom->type == ixgbe_eeprom_uninitialized) {
255 eeprom->semaphore_delay = 10;
256 eeprom->type = ixgbe_flash;
257
258 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
259 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
260 IXGBE_EEC_SIZE_SHIFT);
261 eeprom->word_size = 1 << (eeprom_size +
262 IXGBE_EEPROM_WORD_SIZE_SHIFT);
263
264 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
265 eeprom->type, eeprom->word_size);
266 }
267
268 return 0;
269}
270
271/**
272 * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
273 * @hw: pointer to hardware structure
274 * @offset: offset of word in the EEPROM to read
275 * @data: word read from the EERPOM
276 **/
277static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
278{
279 s32 status;
280
281 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM))
282 status = ixgbe_read_eerd_generic(hw, offset, data);
283 else
284 status = IXGBE_ERR_SWFW_SYNC;
285
286 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
287 return status;
288}
289
290/**
291 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
292 * @hw: pointer to hardware structure
293 * @offset: offset of word in the EEPROM to write
294 * @data: word write to the EEPROM
295 *
296 * Write a 16 bit word to the EEPROM using the EEWR register.
297 **/
298static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
299{
300 u32 eewr;
301 s32 status;
302
303 hw->eeprom.ops.init_params(hw);
304
305 if (offset >= hw->eeprom.word_size) {
306 status = IXGBE_ERR_EEPROM;
307 goto out;
308 }
309
310 eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
311 (data << IXGBE_EEPROM_RW_REG_DATA) |
312 IXGBE_EEPROM_RW_REG_START;
313
314 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM)) {
315 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
316 if (status != 0) {
317 hw_dbg(hw, "Eeprom write EEWR timed out\n");
318 goto out;
319 }
320
321 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
322
323 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
324 if (status != 0) {
325 hw_dbg(hw, "Eeprom write EEWR timed out\n");
326 goto out;
327 }
328 } else {
329 status = IXGBE_ERR_SWFW_SYNC;
330 }
331
332out:
333 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
334 return status;
335}
336
337/**
338 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
339 * @hw: pointer to hardware structure
340 **/
341static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
342{
343 u16 i;
344 u16 j;
345 u16 checksum = 0;
346 u16 length = 0;
347 u16 pointer = 0;
348 u16 word = 0;
349
350 /* Include 0x0-0x3F in the checksum */
351 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
352 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
353 hw_dbg(hw, "EEPROM read failed\n");
354 break;
355 }
356 checksum += word;
357 }
358
359 /*
360 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
361 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
362 */
363 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
364 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
365 continue;
366
367 if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
368 hw_dbg(hw, "EEPROM read failed\n");
369 break;
370 }
371
372 /* Skip pointer section if the pointer is invalid. */
373 if (pointer == 0xFFFF || pointer == 0 ||
374 pointer >= hw->eeprom.word_size)
375 continue;
376
377 if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
378 hw_dbg(hw, "EEPROM read failed\n");
379 break;
380 }
381
382 /* Skip pointer section if length is invalid. */
383 if (length == 0xFFFF || length == 0 ||
384 (pointer + length) >= hw->eeprom.word_size)
385 continue;
386
387 for (j = pointer+1; j <= pointer+length; j++) {
388 if (hw->eeprom.ops.read(hw, j, &word) != 0) {
389 hw_dbg(hw, "EEPROM read failed\n");
390 break;
391 }
392 checksum += word;
393 }
394 }
395
396 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
397
398 return checksum;
399}
400
401/**
402 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
403 * @hw: pointer to hardware structure
404 *
405 * After writing EEPROM to shadow RAM using EEWR register, software calculates
406 * checksum and updates the EEPROM and instructs the hardware to update
407 * the flash.
408 **/
409static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
410{
411 s32 status;
412
413 status = ixgbe_update_eeprom_checksum_generic(hw);
414
415 if (status)
416 status = ixgbe_update_flash_X540(hw);
417
418 return status;
419}
420
421/**
422 * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
423 * @hw: pointer to hardware structure
424 *
425 * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
426 * EEPROM from shadow RAM to the flash device.
427 **/
428static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
429{
430 u32 flup;
431 s32 status = IXGBE_ERR_EEPROM;
432
433 status = ixgbe_poll_flash_update_done_X540(hw);
434 if (status == IXGBE_ERR_EEPROM) {
435 hw_dbg(hw, "Flash update time out\n");
436 goto out;
437 }
438
439 flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
440 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
441
442 status = ixgbe_poll_flash_update_done_X540(hw);
443 if (status)
444 hw_dbg(hw, "Flash update complete\n");
445 else
446 hw_dbg(hw, "Flash update time out\n");
447
448 if (hw->revision_id == 0) {
449 flup = IXGBE_READ_REG(hw, IXGBE_EEC);
450
451 if (flup & IXGBE_EEC_SEC1VAL) {
452 flup |= IXGBE_EEC_FLUP;
453 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
454 }
455
456 status = ixgbe_poll_flash_update_done_X540(hw);
457 if (status)
458 hw_dbg(hw, "Flash update complete\n");
459 else
460 hw_dbg(hw, "Flash update time out\n");
461
462 }
463out:
464 return status;
465}
466
467/**
468 * ixgbe_poll_flash_update_done_X540 - Poll flash update status
469 * @hw: pointer to hardware structure
470 *
471 * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
472 * flash update is done.
473 **/
474static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
475{
476 u32 i;
477 u32 reg;
478 s32 status = IXGBE_ERR_EEPROM;
479
480 for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
481 reg = IXGBE_READ_REG(hw, IXGBE_EEC);
482 if (reg & IXGBE_EEC_FLUDONE) {
483 status = 0;
484 break;
485 }
486 udelay(5);
487 }
488 return status;
489}
490
491/**
492 * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
493 * @hw: pointer to hardware structure
494 * @mask: Mask to specify which semaphore to acquire
495 *
496 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
497 * the specified function (CSR, PHY0, PHY1, NVM, Flash)
498 **/
499static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
500{
501 u32 swfw_sync;
502 u32 swmask = mask;
503 u32 fwmask = mask << 5;
504 u32 hwmask = 0;
505 u32 timeout = 200;
506 u32 i;
507
508 if (swmask == IXGBE_GSSR_EEP_SM)
509 hwmask = IXGBE_GSSR_FLASH_SM;
510
511 for (i = 0; i < timeout; i++) {
512 /*
513 * SW NVM semaphore bit is used for access to all
514 * SW_FW_SYNC bits (not just NVM)
515 */
516 if (ixgbe_get_swfw_sync_semaphore(hw))
517 return IXGBE_ERR_SWFW_SYNC;
518
519 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
520 if (!(swfw_sync & (fwmask | swmask | hwmask))) {
521 swfw_sync |= swmask;
522 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
523 ixgbe_release_swfw_sync_semaphore(hw);
524 break;
525 } else {
526 /*
527 * Firmware currently using resource (fwmask),
528 * hardware currently using resource (hwmask),
529 * or other software thread currently using
530 * resource (swmask)
531 */
532 ixgbe_release_swfw_sync_semaphore(hw);
533 msleep(5);
534 }
535 }
536
537 /*
538 * If the resource is not released by the FW/HW the SW can assume that
539 * the FW/HW malfunctions. In that case the SW should sets the
540 * SW bit(s) of the requested resource(s) while ignoring the
541 * corresponding FW/HW bits in the SW_FW_SYNC register.
542 */
543 if (i >= timeout) {
544 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
545 if (swfw_sync & (fwmask | hwmask)) {
546 if (ixgbe_get_swfw_sync_semaphore(hw))
547 return IXGBE_ERR_SWFW_SYNC;
548
549 swfw_sync |= swmask;
550 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
551 ixgbe_release_swfw_sync_semaphore(hw);
552 }
553 }
554
555 msleep(5);
556 return 0;
557}
558
559/**
560 * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
561 * @hw: pointer to hardware structure
562 * @mask: Mask to specify which semaphore to release
563 *
564 * Releases the SWFW semaphore throught the SW_FW_SYNC register
565 * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
566 **/
567static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
568{
569 u32 swfw_sync;
570 u32 swmask = mask;
571
572 ixgbe_get_swfw_sync_semaphore(hw);
573
574 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
575 swfw_sync &= ~swmask;
576 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
577
578 ixgbe_release_swfw_sync_semaphore(hw);
579 msleep(5);
580}
581
582/**
583 * ixgbe_get_nvm_semaphore - Get hardware semaphore
584 * @hw: pointer to hardware structure
585 *
586 * Sets the hardware semaphores so SW/FW can gain control of shared resources
587 **/
588static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
589{
590 s32 status = IXGBE_ERR_EEPROM;
591 u32 timeout = 2000;
592 u32 i;
593 u32 swsm;
594
595 /* Get SMBI software semaphore between device drivers first */
596 for (i = 0; i < timeout; i++) {
597 /*
598 * If the SMBI bit is 0 when we read it, then the bit will be
599 * set and we have the semaphore
600 */
601 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
602 if (!(swsm & IXGBE_SWSM_SMBI)) {
603 status = 0;
604 break;
605 }
606 udelay(50);
607 }
608
609 /* Now get the semaphore between SW/FW through the REGSMP bit */
610 if (status) {
611 for (i = 0; i < timeout; i++) {
612 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
613 if (!(swsm & IXGBE_SWFW_REGSMP))
614 break;
615
616 udelay(50);
617 }
618 } else {
619 hw_dbg(hw, "Software semaphore SMBI between device drivers "
620 "not granted.\n");
621 }
622
623 return status;
624}
625
626/**
627 * ixgbe_release_nvm_semaphore - Release hardware semaphore
628 * @hw: pointer to hardware structure
629 *
630 * This function clears hardware semaphore bits.
631 **/
632static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
633{
634 u32 swsm;
635
636 /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
637
638 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
639 swsm &= ~IXGBE_SWSM_SMBI;
640 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
641
642 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
643 swsm &= ~IXGBE_SWFW_REGSMP;
644 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
645
646 IXGBE_WRITE_FLUSH(hw);
647}
648
649static struct ixgbe_mac_operations mac_ops_X540 = {
650 .init_hw = &ixgbe_init_hw_generic,
651 .reset_hw = &ixgbe_reset_hw_X540,
652 .start_hw = &ixgbe_start_hw_generic,
653 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
654 .get_media_type = &ixgbe_get_media_type_X540,
655 .get_supported_physical_layer =
656 &ixgbe_get_supported_physical_layer_X540,
657 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
658 .get_mac_addr = &ixgbe_get_mac_addr_generic,
659 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
660 .get_device_caps = NULL,
661 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
662 .stop_adapter = &ixgbe_stop_adapter_generic,
663 .get_bus_info = &ixgbe_get_bus_info_generic,
664 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
665 .read_analog_reg8 = NULL,
666 .write_analog_reg8 = NULL,
667 .setup_link = &ixgbe_setup_mac_link_X540,
668 .check_link = &ixgbe_check_mac_link_generic,
669 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
670 .led_on = &ixgbe_led_on_generic,
671 .led_off = &ixgbe_led_off_generic,
672 .blink_led_start = &ixgbe_blink_led_start_generic,
673 .blink_led_stop = &ixgbe_blink_led_stop_generic,
674 .set_rar = &ixgbe_set_rar_generic,
675 .clear_rar = &ixgbe_clear_rar_generic,
676 .set_vmdq = &ixgbe_set_vmdq_generic,
677 .clear_vmdq = &ixgbe_clear_vmdq_generic,
678 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
679 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
680 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
681 .enable_mc = &ixgbe_enable_mc_generic,
682 .disable_mc = &ixgbe_disable_mc_generic,
683 .clear_vfta = &ixgbe_clear_vfta_generic,
684 .set_vfta = &ixgbe_set_vfta_generic,
685 .fc_enable = &ixgbe_fc_enable_generic,
686 .init_uta_tables = &ixgbe_init_uta_tables_generic,
687 .setup_sfp = NULL,
688};
689
690static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
691 .init_params = &ixgbe_init_eeprom_params_X540,
692 .read = &ixgbe_read_eerd_X540,
693 .write = &ixgbe_write_eewr_X540,
694 .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
695 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
696 .update_checksum = &ixgbe_update_eeprom_checksum_X540,
697};
698
699static struct ixgbe_phy_operations phy_ops_X540 = {
700 .identify = &ixgbe_identify_phy_generic,
701 .identify_sfp = &ixgbe_identify_sfp_module_generic,
702 .init = NULL,
703 .reset = &ixgbe_reset_phy_generic,
704 .read_reg = &ixgbe_read_phy_reg_generic,
705 .write_reg = &ixgbe_write_phy_reg_generic,
706 .setup_link = &ixgbe_setup_phy_link_generic,
707 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
708 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
709 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
710 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
711 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
712 .check_overtemp = &ixgbe_tn_check_overtemp,
713};
714
715struct ixgbe_info ixgbe_X540_info = {
716 .mac = ixgbe_mac_X540,
717 .get_invariants = &ixgbe_get_invariants_X540,
718 .mac_ops = &mac_ops_X540,
719 .eeprom_ops = &eeprom_ops_X540,
720 .phy_ops = &phy_ops_X540,
721 .mbx_ops = &mbx_ops_generic,
722};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
index dd4e0d27e8c..1f35d229e71 100644
--- a/drivers/net/ixgbevf/Makefile
+++ b/drivers/net/ixgbevf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 82599 Virtual Function driver 3# Intel 82599 Virtual Function driver
4# Copyright(c) 1999 - 2009 Intel Corporation. 4# Copyright(c) 1999 - 2010 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index ca2c81f49a0..f8a807d606c 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index da4033c6efa..0cd6abcf930 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index dc03c965238..2216a3c8b12 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -51,9 +51,10 @@ char ixgbevf_driver_name[] = "ixgbevf";
51static const char ixgbevf_driver_string[] = 51static const char ixgbevf_driver_string[] =
52 "Intel(R) 82599 Virtual Function"; 52 "Intel(R) 82599 Virtual Function";
53 53
54#define DRV_VERSION "1.0.0-k0" 54#define DRV_VERSION "1.0.12-k0"
55const char ixgbevf_driver_version[] = DRV_VERSION; 55const char ixgbevf_driver_version[] = DRV_VERSION;
56static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 56static char ixgbevf_copyright[] =
57 "Copyright (c) 2009 - 2010 Intel Corporation.";
57 58
58static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 59static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
59 [board_82599_vf] = &ixgbevf_vf_info, 60 [board_82599_vf] = &ixgbevf_vf_info,
@@ -2488,10 +2489,9 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2488 int size; 2489 int size;
2489 2490
2490 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2491 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2491 tx_ring->tx_buffer_info = vmalloc(size); 2492 tx_ring->tx_buffer_info = vzalloc(size);
2492 if (!tx_ring->tx_buffer_info) 2493 if (!tx_ring->tx_buffer_info)
2493 goto err; 2494 goto err;
2494 memset(tx_ring->tx_buffer_info, 0, size);
2495 2495
2496 /* round up to nearest 4K */ 2496 /* round up to nearest 4K */
2497 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2497 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -2555,14 +2555,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2555 int size; 2555 int size;
2556 2556
2557 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2557 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2558 rx_ring->rx_buffer_info = vmalloc(size); 2558 rx_ring->rx_buffer_info = vzalloc(size);
2559 if (!rx_ring->rx_buffer_info) { 2559 if (!rx_ring->rx_buffer_info) {
2560 hw_dbg(&adapter->hw, 2560 hw_dbg(&adapter->hw,
2561 "Unable to vmalloc buffer memory for " 2561 "Unable to vmalloc buffer memory for "
2562 "the receive descriptor ring\n"); 2562 "the receive descriptor ring\n");
2563 goto alloc_failed; 2563 goto alloc_failed;
2564 } 2564 }
2565 memset(rx_ring->rx_buffer_info, 0, size);
2566 2565
2567 /* Round up to nearest 4K */ 2566 /* Round up to nearest 4K */
2568 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2567 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
@@ -3424,10 +3423,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3424 if (hw->mac.ops.get_bus_info) 3423 if (hw->mac.ops.get_bus_info)
3425 hw->mac.ops.get_bus_info(hw); 3424 hw->mac.ops.get_bus_info(hw);
3426 3425
3427
3428 netif_carrier_off(netdev);
3429 netif_tx_stop_all_queues(netdev);
3430
3431 strcpy(netdev->name, "eth%d"); 3426 strcpy(netdev->name, "eth%d");
3432 3427
3433 err = register_netdev(netdev); 3428 err = register_netdev(netdev);
@@ -3436,6 +3431,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3436 3431
3437 adapter->netdev_registered = true; 3432 adapter->netdev_registered = true;
3438 3433
3434 netif_carrier_off(netdev);
3435
3439 ixgbevf_init_last_counter_stats(adapter); 3436 ixgbevf_init_last_counter_stats(adapter);
3440 3437
3441 /* print the MAC address */ 3438 /* print the MAC address */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
index 84ac486f4a6..7a883312577 100644
--- a/drivers/net/ixgbevf/mbx.c
+++ b/drivers/net/ixgbevf/mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index 8c063bebee7..b2b5bf5daa3 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
index 12f75960aec..fb80ca1bcc9 100644
--- a/drivers/net/ixgbevf/regs.h
+++ b/drivers/net/ixgbevf/regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index bfe42c1fcfa..971019d819b 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 61f9dc83142..144c99d5363 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index c57d9a43cec..2411e72ba57 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2076,12 +2076,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
2076 } 2076 }
2077 2077
2078 if (new_mtu > 1900) { 2078 if (new_mtu > 1900) {
2079 netdev->features &= ~(NETIF_F_HW_CSUM | 2079 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2080 NETIF_F_TSO | 2080 NETIF_F_TSO | NETIF_F_TSO6);
2081 NETIF_F_TSO6);
2082 } else { 2081 } else {
2083 if (test_bit(JME_FLAG_TXCSUM, &jme->flags)) 2082 if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
2084 netdev->features |= NETIF_F_HW_CSUM; 2083 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2085 if (test_bit(JME_FLAG_TSO, &jme->flags)) 2084 if (test_bit(JME_FLAG_TSO, &jme->flags))
2086 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2085 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2087 } 2086 }
@@ -2514,10 +2513,12 @@ jme_set_tx_csum(struct net_device *netdev, u32 on)
2514 if (on) { 2513 if (on) {
2515 set_bit(JME_FLAG_TXCSUM, &jme->flags); 2514 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2516 if (netdev->mtu <= 1900) 2515 if (netdev->mtu <= 1900)
2517 netdev->features |= NETIF_F_HW_CSUM; 2516 netdev->features |=
2517 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2518 } else { 2518 } else {
2519 clear_bit(JME_FLAG_TXCSUM, &jme->flags); 2519 clear_bit(JME_FLAG_TXCSUM, &jme->flags);
2520 netdev->features &= ~NETIF_F_HW_CSUM; 2520 netdev->features &=
2521 ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2521 } 2522 }
2522 2523
2523 return 0; 2524 return 0;
@@ -2797,7 +2798,8 @@ jme_init_one(struct pci_dev *pdev,
2797 netdev->netdev_ops = &jme_netdev_ops; 2798 netdev->netdev_ops = &jme_netdev_ops;
2798 netdev->ethtool_ops = &jme_ethtool_ops; 2799 netdev->ethtool_ops = &jme_ethtool_ops;
2799 netdev->watchdog_timeo = TX_TIMEOUT; 2800 netdev->watchdog_timeo = TX_TIMEOUT;
2800 netdev->features = NETIF_F_HW_CSUM | 2801 netdev->features = NETIF_F_IP_CSUM |
2802 NETIF_F_IPV6_CSUM |
2801 NETIF_F_SG | 2803 NETIF_F_SG |
2802 NETIF_F_TSO | 2804 NETIF_F_TSO |
2803 NETIF_F_TSO6 | 2805 NETIF_F_TSO6 |
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 51919fcd50c..0fa4a9887ba 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -1545,6 +1545,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
1545 1545
1546/* driver bus management functions */ 1546/* driver bus management functions */
1547 1547
1548#ifdef CONFIG_PM
1549static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
1550{
1551 struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
1552 struct net_device *dev = ks->netdev;
1553
1554 if (netif_running(dev)) {
1555 netif_device_detach(dev);
1556 ks8851_net_stop(dev);
1557 }
1558
1559 return 0;
1560}
1561
1562static int ks8851_resume(struct spi_device *spi)
1563{
1564 struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
1565 struct net_device *dev = ks->netdev;
1566
1567 if (netif_running(dev)) {
1568 ks8851_net_open(dev);
1569 netif_device_attach(dev);
1570 }
1571
1572 return 0;
1573}
1574#else
1575#define ks8851_suspend NULL
1576#define ks8851_resume NULL
1577#endif
1578
1548static int __devinit ks8851_probe(struct spi_device *spi) 1579static int __devinit ks8851_probe(struct spi_device *spi)
1549{ 1580{
1550 struct net_device *ndev; 1581 struct net_device *ndev;
@@ -1679,6 +1710,8 @@ static struct spi_driver ks8851_driver = {
1679 }, 1710 },
1680 .probe = ks8851_probe, 1711 .probe = ks8851_probe,
1681 .remove = __devexit_p(ks8851_remove), 1712 .remove = __devexit_p(ks8851_remove),
1713 .suspend = ks8851_suspend,
1714 .resume = ks8851_resume,
1682}; 1715};
1683 1716
1684static int __init ks8851_init(void) 1717static int __init ks8851_init(void)
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index f06296bfe29..02336edce74 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -207,7 +207,7 @@ tx_full and tbusy flags.
207#define LANCE_BUS_IF 0x16 207#define LANCE_BUS_IF 0x16
208#define LANCE_TOTAL_SIZE 0x18 208#define LANCE_TOTAL_SIZE 0x18
209 209
210#define TX_TIMEOUT 20 210#define TX_TIMEOUT (HZ/5)
211 211
212/* The LANCE Rx and Tx ring descriptors. */ 212/* The LANCE Rx and Tx ring descriptors. */
213struct lance_rx_head { 213struct lance_rx_head {
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index c27f4291b35..9e042894479 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -161,7 +161,7 @@ enum commands {
161#define RX_SUSPEND 0x0030 161#define RX_SUSPEND 0x0030
162#define RX_ABORT 0x0040 162#define RX_ABORT 0x0040
163 163
164#define TX_TIMEOUT 5 164#define TX_TIMEOUT (HZ/20)
165 165
166 166
167struct i596_reg { 167struct i596_reg {
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index e7030ceb178..da74db4a03d 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -203,7 +203,7 @@ static void __NS8390_init(struct net_device *dev, int startp);
203static int __ei_open(struct net_device *dev) 203static int __ei_open(struct net_device *dev)
204{ 204{
205 unsigned long flags; 205 unsigned long flags;
206 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 206 struct ei_device *ei_local = netdev_priv(dev);
207 207
208 if (dev->watchdog_timeo <= 0) 208 if (dev->watchdog_timeo <= 0)
209 dev->watchdog_timeo = TX_TIMEOUT; 209 dev->watchdog_timeo = TX_TIMEOUT;
@@ -231,7 +231,7 @@ static int __ei_open(struct net_device *dev)
231 */ 231 */
232static int __ei_close(struct net_device *dev) 232static int __ei_close(struct net_device *dev)
233{ 233{
234 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 234 struct ei_device *ei_local = netdev_priv(dev);
235 unsigned long flags; 235 unsigned long flags;
236 236
237 /* 237 /*
@@ -256,7 +256,7 @@ static int __ei_close(struct net_device *dev)
256static void __ei_tx_timeout(struct net_device *dev) 256static void __ei_tx_timeout(struct net_device *dev)
257{ 257{
258 unsigned long e8390_base = dev->base_addr; 258 unsigned long e8390_base = dev->base_addr;
259 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 259 struct ei_device *ei_local = netdev_priv(dev);
260 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); 260 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
261 unsigned long flags; 261 unsigned long flags;
262 262
@@ -303,7 +303,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
303 struct net_device *dev) 303 struct net_device *dev)
304{ 304{
305 unsigned long e8390_base = dev->base_addr; 305 unsigned long e8390_base = dev->base_addr;
306 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 306 struct ei_device *ei_local = netdev_priv(dev);
307 int send_length = skb->len, output_page; 307 int send_length = skb->len, output_page;
308 unsigned long flags; 308 unsigned long flags;
309 char buf[ETH_ZLEN]; 309 char buf[ETH_ZLEN];
@@ -592,7 +592,7 @@ static void ei_tx_err(struct net_device *dev)
592static void ei_tx_intr(struct net_device *dev) 592static void ei_tx_intr(struct net_device *dev)
593{ 593{
594 unsigned long e8390_base = dev->base_addr; 594 unsigned long e8390_base = dev->base_addr;
595 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 595 struct ei_device *ei_local = netdev_priv(dev);
596 int status = ei_inb(e8390_base + EN0_TSR); 596 int status = ei_inb(e8390_base + EN0_TSR);
597 597
598 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ 598 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
@@ -675,7 +675,7 @@ static void ei_tx_intr(struct net_device *dev)
675static void ei_receive(struct net_device *dev) 675static void ei_receive(struct net_device *dev)
676{ 676{
677 unsigned long e8390_base = dev->base_addr; 677 unsigned long e8390_base = dev->base_addr;
678 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 678 struct ei_device *ei_local = netdev_priv(dev);
679 unsigned char rxing_page, this_frame, next_frame; 679 unsigned char rxing_page, this_frame, next_frame;
680 unsigned short current_offset; 680 unsigned short current_offset;
681 int rx_pkt_count = 0; 681 int rx_pkt_count = 0;
@@ -879,7 +879,7 @@ static void ei_rx_overrun(struct net_device *dev)
879static struct net_device_stats *__ei_get_stats(struct net_device *dev) 879static struct net_device_stats *__ei_get_stats(struct net_device *dev)
880{ 880{
881 unsigned long ioaddr = dev->base_addr; 881 unsigned long ioaddr = dev->base_addr;
882 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 882 struct ei_device *ei_local = netdev_priv(dev);
883 unsigned long flags; 883 unsigned long flags;
884 884
885 /* If the card is stopped, just return the present stats. */ 885 /* If the card is stopped, just return the present stats. */
@@ -927,7 +927,7 @@ static void do_set_multicast_list(struct net_device *dev)
927{ 927{
928 unsigned long e8390_base = dev->base_addr; 928 unsigned long e8390_base = dev->base_addr;
929 int i; 929 int i;
930 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 930 struct ei_device *ei_local = netdev_priv(dev);
931 931
932 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) 932 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
933 { 933 {
@@ -981,7 +981,7 @@ static void do_set_multicast_list(struct net_device *dev)
981static void __ei_set_multicast_list(struct net_device *dev) 981static void __ei_set_multicast_list(struct net_device *dev)
982{ 982{
983 unsigned long flags; 983 unsigned long flags;
984 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 984 struct ei_device *ei_local = netdev_priv(dev);
985 985
986 spin_lock_irqsave(&ei_local->page_lock, flags); 986 spin_lock_irqsave(&ei_local->page_lock, flags);
987 do_set_multicast_list(dev); 987 do_set_multicast_list(dev);
@@ -998,7 +998,7 @@ static void __ei_set_multicast_list(struct net_device *dev)
998 998
999static void ethdev_setup(struct net_device *dev) 999static void ethdev_setup(struct net_device *dev)
1000{ 1000{
1001 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1001 struct ei_device *ei_local = netdev_priv(dev);
1002 if (ei_debug > 1) 1002 if (ei_debug > 1)
1003 printk(version); 1003 printk(version);
1004 1004
@@ -1036,7 +1036,7 @@ static struct net_device *____alloc_ei_netdev(int size)
1036static void __NS8390_init(struct net_device *dev, int startp) 1036static void __NS8390_init(struct net_device *dev, int startp)
1037{ 1037{
1038 unsigned long e8390_base = dev->base_addr; 1038 unsigned long e8390_base = dev->base_addr;
1039 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1039 struct ei_device *ei_local = netdev_priv(dev);
1040 int i; 1040 int i;
1041 int endcfg = ei_local->word16 1041 int endcfg = ei_local->word16
1042 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) 1042 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
@@ -1099,7 +1099,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1099 int start_page) 1099 int start_page)
1100{ 1100{
1101 unsigned long e8390_base = dev->base_addr; 1101 unsigned long e8390_base = dev->base_addr;
1102 struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); 1102 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1103 1103
1104 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); 1104 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1105 1105
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0fc9dc7f20d..6ed577b065d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -38,6 +38,7 @@ struct macvlan_port {
38 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 38 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
39 struct list_head vlans; 39 struct list_head vlans;
40 struct rcu_head rcu; 40 struct rcu_head rcu;
41 bool passthru;
41}; 42};
42 43
43#define macvlan_port_get_rcu(dev) \ 44#define macvlan_port_get_rcu(dev) \
@@ -169,6 +170,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
169 macvlan_broadcast(skb, port, NULL, 170 macvlan_broadcast(skb, port, NULL,
170 MACVLAN_MODE_PRIVATE | 171 MACVLAN_MODE_PRIVATE |
171 MACVLAN_MODE_VEPA | 172 MACVLAN_MODE_VEPA |
173 MACVLAN_MODE_PASSTHRU|
172 MACVLAN_MODE_BRIDGE); 174 MACVLAN_MODE_BRIDGE);
173 else if (src->mode == MACVLAN_MODE_VEPA) 175 else if (src->mode == MACVLAN_MODE_VEPA)
174 /* flood to everyone except source */ 176 /* flood to everyone except source */
@@ -185,7 +187,10 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
185 return skb; 187 return skb;
186 } 188 }
187 189
188 vlan = macvlan_hash_lookup(port, eth->h_dest); 190 if (port->passthru)
191 vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
192 else
193 vlan = macvlan_hash_lookup(port, eth->h_dest);
189 if (vlan == NULL) 194 if (vlan == NULL)
190 return skb; 195 return skb;
191 196
@@ -243,18 +248,22 @@ xmit_world:
243netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 248netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
244 struct net_device *dev) 249 struct net_device *dev)
245{ 250{
246 int i = skb_get_queue_mapping(skb);
247 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
248 unsigned int len = skb->len; 251 unsigned int len = skb->len;
249 int ret; 252 int ret;
253 const struct macvlan_dev *vlan = netdev_priv(dev);
250 254
251 ret = macvlan_queue_xmit(skb, dev); 255 ret = macvlan_queue_xmit(skb, dev);
252 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 256 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
253 txq->tx_packets++; 257 struct macvlan_pcpu_stats *pcpu_stats;
254 txq->tx_bytes += len;
255 } else
256 txq->tx_dropped++;
257 258
259 pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
260 u64_stats_update_begin(&pcpu_stats->syncp);
261 pcpu_stats->tx_packets++;
262 pcpu_stats->tx_bytes += len;
263 u64_stats_update_end(&pcpu_stats->syncp);
264 } else {
265 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
266 }
258 return ret; 267 return ret;
259} 268}
260EXPORT_SYMBOL_GPL(macvlan_start_xmit); 269EXPORT_SYMBOL_GPL(macvlan_start_xmit);
@@ -284,6 +293,11 @@ static int macvlan_open(struct net_device *dev)
284 struct net_device *lowerdev = vlan->lowerdev; 293 struct net_device *lowerdev = vlan->lowerdev;
285 int err; 294 int err;
286 295
296 if (vlan->port->passthru) {
297 dev_set_promiscuity(lowerdev, 1);
298 goto hash_add;
299 }
300
287 err = -EBUSY; 301 err = -EBUSY;
288 if (macvlan_addr_busy(vlan->port, dev->dev_addr)) 302 if (macvlan_addr_busy(vlan->port, dev->dev_addr))
289 goto out; 303 goto out;
@@ -296,6 +310,8 @@ static int macvlan_open(struct net_device *dev)
296 if (err < 0) 310 if (err < 0)
297 goto del_unicast; 311 goto del_unicast;
298 } 312 }
313
314hash_add:
299 macvlan_hash_add(vlan); 315 macvlan_hash_add(vlan);
300 return 0; 316 return 0;
301 317
@@ -310,12 +326,18 @@ static int macvlan_stop(struct net_device *dev)
310 struct macvlan_dev *vlan = netdev_priv(dev); 326 struct macvlan_dev *vlan = netdev_priv(dev);
311 struct net_device *lowerdev = vlan->lowerdev; 327 struct net_device *lowerdev = vlan->lowerdev;
312 328
329 if (vlan->port->passthru) {
330 dev_set_promiscuity(lowerdev, -1);
331 goto hash_del;
332 }
333
313 dev_mc_unsync(lowerdev, dev); 334 dev_mc_unsync(lowerdev, dev);
314 if (dev->flags & IFF_ALLMULTI) 335 if (dev->flags & IFF_ALLMULTI)
315 dev_set_allmulti(lowerdev, -1); 336 dev_set_allmulti(lowerdev, -1);
316 337
317 dev_uc_del(lowerdev, dev->dev_addr); 338 dev_uc_del(lowerdev, dev->dev_addr);
318 339
340hash_del:
319 macvlan_hash_del(vlan); 341 macvlan_hash_del(vlan);
320 return 0; 342 return 0;
321} 343}
@@ -414,14 +436,15 @@ static int macvlan_init(struct net_device *dev)
414 dev->state = (dev->state & ~MACVLAN_STATE_MASK) | 436 dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
415 (lowerdev->state & MACVLAN_STATE_MASK); 437 (lowerdev->state & MACVLAN_STATE_MASK);
416 dev->features = lowerdev->features & MACVLAN_FEATURES; 438 dev->features = lowerdev->features & MACVLAN_FEATURES;
439 dev->features |= NETIF_F_LLTX;
417 dev->gso_max_size = lowerdev->gso_max_size; 440 dev->gso_max_size = lowerdev->gso_max_size;
418 dev->iflink = lowerdev->ifindex; 441 dev->iflink = lowerdev->ifindex;
419 dev->hard_header_len = lowerdev->hard_header_len; 442 dev->hard_header_len = lowerdev->hard_header_len;
420 443
421 macvlan_set_lockdep_class(dev); 444 macvlan_set_lockdep_class(dev);
422 445
423 vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats); 446 vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
424 if (!vlan->rx_stats) 447 if (!vlan->pcpu_stats)
425 return -ENOMEM; 448 return -ENOMEM;
426 449
427 return 0; 450 return 0;
@@ -431,7 +454,7 @@ static void macvlan_uninit(struct net_device *dev)
431{ 454{
432 struct macvlan_dev *vlan = netdev_priv(dev); 455 struct macvlan_dev *vlan = netdev_priv(dev);
433 456
434 free_percpu(vlan->rx_stats); 457 free_percpu(vlan->pcpu_stats);
435} 458}
436 459
437static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, 460static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -439,33 +462,38 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
439{ 462{
440 struct macvlan_dev *vlan = netdev_priv(dev); 463 struct macvlan_dev *vlan = netdev_priv(dev);
441 464
442 dev_txq_stats_fold(dev, stats); 465 if (vlan->pcpu_stats) {
443 466 struct macvlan_pcpu_stats *p;
444 if (vlan->rx_stats) { 467 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
445 struct macvlan_rx_stats *p, accum = {0}; 468 u32 rx_errors = 0, tx_dropped = 0;
446 u64 rx_packets, rx_bytes, rx_multicast;
447 unsigned int start; 469 unsigned int start;
448 int i; 470 int i;
449 471
450 for_each_possible_cpu(i) { 472 for_each_possible_cpu(i) {
451 p = per_cpu_ptr(vlan->rx_stats, i); 473 p = per_cpu_ptr(vlan->pcpu_stats, i);
452 do { 474 do {
453 start = u64_stats_fetch_begin_bh(&p->syncp); 475 start = u64_stats_fetch_begin_bh(&p->syncp);
454 rx_packets = p->rx_packets; 476 rx_packets = p->rx_packets;
455 rx_bytes = p->rx_bytes; 477 rx_bytes = p->rx_bytes;
456 rx_multicast = p->rx_multicast; 478 rx_multicast = p->rx_multicast;
479 tx_packets = p->tx_packets;
480 tx_bytes = p->tx_bytes;
457 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 481 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
458 accum.rx_packets += rx_packets; 482
459 accum.rx_bytes += rx_bytes; 483 stats->rx_packets += rx_packets;
460 accum.rx_multicast += rx_multicast; 484 stats->rx_bytes += rx_bytes;
461 /* rx_errors is an ulong, updated without syncp protection */ 485 stats->multicast += rx_multicast;
462 accum.rx_errors += p->rx_errors; 486 stats->tx_packets += tx_packets;
487 stats->tx_bytes += tx_bytes;
488 /* rx_errors & tx_dropped are u32, updated
489 * without syncp protection.
490 */
491 rx_errors += p->rx_errors;
492 tx_dropped += p->tx_dropped;
463 } 493 }
464 stats->rx_packets = accum.rx_packets; 494 stats->rx_errors = rx_errors;
465 stats->rx_bytes = accum.rx_bytes; 495 stats->rx_dropped = rx_errors;
466 stats->rx_errors = accum.rx_errors; 496 stats->tx_dropped = tx_dropped;
467 stats->rx_dropped = accum.rx_errors;
468 stats->multicast = accum.rx_multicast;
469 } 497 }
470 return stats; 498 return stats;
471} 499}
@@ -549,6 +577,7 @@ static int macvlan_port_create(struct net_device *dev)
549 if (port == NULL) 577 if (port == NULL)
550 return -ENOMEM; 578 return -ENOMEM;
551 579
580 port->passthru = false;
552 port->dev = dev; 581 port->dev = dev;
553 INIT_LIST_HEAD(&port->vlans); 582 INIT_LIST_HEAD(&port->vlans);
554 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 583 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
@@ -593,6 +622,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
593 case MACVLAN_MODE_PRIVATE: 622 case MACVLAN_MODE_PRIVATE:
594 case MACVLAN_MODE_VEPA: 623 case MACVLAN_MODE_VEPA:
595 case MACVLAN_MODE_BRIDGE: 624 case MACVLAN_MODE_BRIDGE:
625 case MACVLAN_MODE_PASSTHRU:
596 break; 626 break;
597 default: 627 default:
598 return -EINVAL; 628 return -EINVAL;
@@ -601,25 +631,6 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
601 return 0; 631 return 0;
602} 632}
603 633
604static int macvlan_get_tx_queues(struct net *net,
605 struct nlattr *tb[],
606 unsigned int *num_tx_queues,
607 unsigned int *real_num_tx_queues)
608{
609 struct net_device *real_dev;
610
611 if (!tb[IFLA_LINK])
612 return -EINVAL;
613
614 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
615 if (!real_dev)
616 return -ENODEV;
617
618 *num_tx_queues = real_dev->num_tx_queues;
619 *real_num_tx_queues = real_dev->real_num_tx_queues;
620 return 0;
621}
622
623int macvlan_common_newlink(struct net *src_net, struct net_device *dev, 634int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
624 struct nlattr *tb[], struct nlattr *data[], 635 struct nlattr *tb[], struct nlattr *data[],
625 int (*receive)(struct sk_buff *skb), 636 int (*receive)(struct sk_buff *skb),
@@ -661,6 +672,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
661 } 672 }
662 port = macvlan_port_get(lowerdev); 673 port = macvlan_port_get(lowerdev);
663 674
675 /* Only 1 macvlan device can be created in passthru mode */
676 if (port->passthru)
677 return -EINVAL;
678
664 vlan->lowerdev = lowerdev; 679 vlan->lowerdev = lowerdev;
665 vlan->dev = dev; 680 vlan->dev = dev;
666 vlan->port = port; 681 vlan->port = port;
@@ -671,6 +686,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
671 if (data && data[IFLA_MACVLAN_MODE]) 686 if (data && data[IFLA_MACVLAN_MODE])
672 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 687 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
673 688
689 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
690 if (!list_empty(&port->vlans))
691 return -EINVAL;
692 port->passthru = true;
693 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
694 }
695
674 err = register_netdevice(dev); 696 err = register_netdevice(dev);
675 if (err < 0) 697 if (err < 0)
676 goto destroy_port; 698 goto destroy_port;
@@ -743,7 +765,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
743{ 765{
744 /* common fields */ 766 /* common fields */
745 ops->priv_size = sizeof(struct macvlan_dev); 767 ops->priv_size = sizeof(struct macvlan_dev);
746 ops->get_tx_queues = macvlan_get_tx_queues;
747 ops->validate = macvlan_validate; 768 ops->validate = macvlan_validate;
748 ops->maxtype = IFLA_MACVLAN_MAX; 769 ops->maxtype = IFLA_MACVLAN_MAX;
749 ops->policy = macvlan_policy; 770 ops->policy = macvlan_policy;
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index e0b0ef11f11..30be8c634eb 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -86,7 +86,7 @@ static u32 reg_offset[16];
86 86
87static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr) 87static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
88{ 88{
89 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 89 struct ei_device *ei_local = netdev_priv(dev);
90 int i; 90 int i;
91 unsigned char bus_width; 91 unsigned char bus_width;
92 92
@@ -218,7 +218,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
218 int start_page, stop_page; 218 int start_page, stop_page;
219 int reg0, ret; 219 int reg0, ret;
220 static unsigned version_printed; 220 static unsigned version_printed;
221 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 221 struct ei_device *ei_local = netdev_priv(dev);
222 unsigned char bus_width; 222 unsigned char bus_width;
223 223
224 if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) 224 if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
@@ -371,7 +371,7 @@ static int ne_close(struct net_device *dev)
371static void ne_reset_8390(struct net_device *dev) 371static void ne_reset_8390(struct net_device *dev)
372{ 372{
373 unsigned long reset_start_time = jiffies; 373 unsigned long reset_start_time = jiffies;
374 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 374 struct ei_device *ei_local = netdev_priv(dev);
375 375
376 if (ei_debug > 1) 376 if (ei_debug > 1)
377 printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); 377 printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
@@ -397,7 +397,7 @@ static void ne_reset_8390(struct net_device *dev)
397 397
398static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) 398static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
399{ 399{
400 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 400 struct ei_device *ei_local = netdev_priv(dev);
401 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 401 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
402 402
403 if (ei_status.dmaing) 403 if (ei_status.dmaing)
@@ -437,7 +437,7 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
437 437
438static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) 438static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
439{ 439{
440 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 440 struct ei_device *ei_local = netdev_priv(dev);
441#ifdef NE_SANITY_CHECK 441#ifdef NE_SANITY_CHECK
442 int xfer_count = count; 442 int xfer_count = count;
443#endif 443#endif
@@ -507,7 +507,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
507static void ne_block_output(struct net_device *dev, int count, 507static void ne_block_output(struct net_device *dev, int count,
508 const unsigned char *buf, const int start_page) 508 const unsigned char *buf, const int start_page)
509{ 509{
510 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 510 struct ei_device *ei_local = netdev_priv(dev);
511 unsigned long dma_start; 511 unsigned long dma_start;
512#ifdef NE_SANITY_CHECK 512#ifdef NE_SANITY_CHECK
513 int retries = 0; 513 int retries = 0;
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 95fe552aa27..731077d8d96 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -214,13 +214,12 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
214 tx_ring->num_desc = adapter->num_txd; 214 tx_ring->num_desc = adapter->num_txd;
215 tx_ring->txq = netdev_get_tx_queue(netdev, 0); 215 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
216 216
217 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); 217 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
218 if (cmd_buf_arr == NULL) { 218 if (cmd_buf_arr == NULL) {
219 dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", 219 dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
220 netdev->name); 220 netdev->name);
221 goto err_out; 221 goto err_out;
222 } 222 }
223 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
224 tx_ring->cmd_buf_arr = cmd_buf_arr; 223 tx_ring->cmd_buf_arr = cmd_buf_arr;
225 224
226 recv_ctx = &adapter->recv_ctx; 225 recv_ctx = &adapter->recv_ctx;
@@ -279,8 +278,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
279 break; 278 break;
280 279
281 } 280 }
282 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) 281 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
283 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
284 if (rds_ring->rx_buf_arr == NULL) { 282 if (rds_ring->rx_buf_arr == NULL) {
285 printk(KERN_ERR "%s: Failed to allocate " 283 printk(KERN_ERR "%s: Failed to allocate "
286 "rx buffer ring %d\n", 284 "rx buffer ring %d\n",
@@ -288,7 +286,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
288 /* free whatever was already allocated */ 286 /* free whatever was already allocated */
289 goto err_out; 287 goto err_out;
290 } 288 }
291 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
292 INIT_LIST_HEAD(&rds_ring->free_list); 289 INIT_LIST_HEAD(&rds_ring->free_list);
293 /* 290 /*
294 * Now go through all of them, set reference handles 291 * Now go through all of them, set reference handles
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index e1d30d7f207..ceeaac989df 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1277,6 +1277,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1277 int i = 0, err; 1277 int i = 0, err;
1278 int pci_func_id = PCI_FUNC(pdev->devfn); 1278 int pci_func_id = PCI_FUNC(pdev->devfn);
1279 uint8_t revision_id; 1279 uint8_t revision_id;
1280 u32 val;
1280 1281
1281 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { 1282 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
1282 pr_warning("%s: chip revisions between 0x%x-0x%x " 1283 pr_warning("%s: chip revisions between 0x%x-0x%x "
@@ -1352,8 +1353,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1352 break; 1353 break;
1353 } 1354 }
1354 1355
1355 if (reset_devices) { 1356 if (adapter->portnum == 0) {
1356 if (adapter->portnum == 0) { 1357 val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
1358 if (val != 0xffffffff && val != 0) {
1357 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); 1359 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
1358 adapter->need_fw_reset = 1; 1360 adapter->need_fw_reset = 1;
1359 } 1361 }
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
index c8cc32c0edc..c8c873b31a8 100644
--- a/drivers/net/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -469,18 +469,6 @@ static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
469} 469}
470 470
471/** 471/**
472 * pch_gbe_get_tx_csum - Report whether transmit checksums are turned on or off
473 * @netdev: Network interface device structure
474 * Returns
475 * true(1): Checksum On
476 * false(0): Checksum Off
477 */
478static u32 pch_gbe_get_tx_csum(struct net_device *netdev)
479{
480 return (netdev->features & NETIF_F_HW_CSUM) != 0;
481}
482
483/**
484 * pch_gbe_set_tx_csum - Turn transmit checksums on or off 472 * pch_gbe_set_tx_csum - Turn transmit checksums on or off
485 * @netdev: Network interface device structure 473 * @netdev: Network interface device structure
486 * @data: Checksum on[true] or off[false] 474 * @data: Checksum on[true] or off[false]
@@ -493,11 +481,7 @@ static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
493 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 481 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
494 482
495 adapter->tx_csum = data; 483 adapter->tx_csum = data;
496 if (data) 484 return ethtool_op_set_tx_ipv6_csum(netdev, data);
497 netdev->features |= NETIF_F_HW_CSUM;
498 else
499 netdev->features &= ~NETIF_F_HW_CSUM;
500 return 0;
501} 485}
502 486
503/** 487/**
@@ -572,7 +556,6 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
572 .set_pauseparam = pch_gbe_set_pauseparam, 556 .set_pauseparam = pch_gbe_set_pauseparam,
573 .get_rx_csum = pch_gbe_get_rx_csum, 557 .get_rx_csum = pch_gbe_get_rx_csum,
574 .set_rx_csum = pch_gbe_set_rx_csum, 558 .set_rx_csum = pch_gbe_set_rx_csum,
575 .get_tx_csum = pch_gbe_get_tx_csum,
576 .set_tx_csum = pch_gbe_set_tx_csum, 559 .set_tx_csum = pch_gbe_set_tx_csum,
577 .get_strings = pch_gbe_get_strings, 560 .get_strings = pch_gbe_get_strings,
578 .get_ethtool_stats = pch_gbe_get_ethtool_stats, 561 .get_ethtool_stats = pch_gbe_get_ethtool_stats,
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 03a1d280105..d7355306a73 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1523,12 +1523,11 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1523 int desNo; 1523 int desNo;
1524 1524
1525 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count; 1525 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1526 tx_ring->buffer_info = vmalloc(size); 1526 tx_ring->buffer_info = vzalloc(size);
1527 if (!tx_ring->buffer_info) { 1527 if (!tx_ring->buffer_info) {
1528 pr_err("Unable to allocate memory for the buffer infomation\n"); 1528 pr_err("Unable to allocate memory for the buffer infomation\n");
1529 return -ENOMEM; 1529 return -ENOMEM;
1530 } 1530 }
1531 memset(tx_ring->buffer_info, 0, size);
1532 1531
1533 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1532 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1534 1533
@@ -1573,12 +1572,11 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1573 int desNo; 1572 int desNo;
1574 1573
1575 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count; 1574 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1576 rx_ring->buffer_info = vmalloc(size); 1575 rx_ring->buffer_info = vzalloc(size);
1577 if (!rx_ring->buffer_info) { 1576 if (!rx_ring->buffer_info) {
1578 pr_err("Unable to allocate memory for the receive descriptor ring\n"); 1577 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1579 return -ENOMEM; 1578 return -ENOMEM;
1580 } 1579 }
1581 memset(rx_ring->buffer_info, 0, size);
1582 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1580 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1583 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1581 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1584 &rx_ring->dma, GFP_KERNEL); 1582 &rx_ring->dma, GFP_KERNEL);
@@ -2321,7 +2319,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2321 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; 2319 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2322 netif_napi_add(netdev, &adapter->napi, 2320 netif_napi_add(netdev, &adapter->napi,
2323 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT); 2321 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2324 netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO; 2322 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
2325 pch_gbe_set_ethtool_ops(netdev); 2323 pch_gbe_set_ethtool_ops(netdev);
2326 2324
2327 pch_gbe_mac_reset_hw(&adapter->hw); 2325 pch_gbe_mac_reset_hw(&adapter->hw);
@@ -2360,9 +2358,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2360 pch_gbe_check_options(adapter); 2358 pch_gbe_check_options(adapter);
2361 2359
2362 if (adapter->tx_csum) 2360 if (adapter->tx_csum)
2363 netdev->features |= NETIF_F_HW_CSUM; 2361 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2364 else 2362 else
2365 netdev->features &= ~NETIF_F_HW_CSUM; 2363 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2366 2364
2367 /* initialize the wol settings based on the eeprom settings */ 2365 /* initialize the wol settings based on the eeprom settings */
2368 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING; 2366 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 8a4d19e5de0..1a0eb128e60 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -875,7 +875,7 @@ static void do_set_multicast_list(struct net_device *dev);
875static int ax_open(struct net_device *dev) 875static int ax_open(struct net_device *dev)
876{ 876{
877 unsigned long flags; 877 unsigned long flags;
878 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 878 struct ei_device *ei_local = netdev_priv(dev);
879 879
880 /* 880 /*
881 * Grab the page lock so we own the register set, then call 881 * Grab the page lock so we own the register set, then call
@@ -926,7 +926,7 @@ static int ax_close(struct net_device *dev)
926static void axnet_tx_timeout(struct net_device *dev) 926static void axnet_tx_timeout(struct net_device *dev)
927{ 927{
928 long e8390_base = dev->base_addr; 928 long e8390_base = dev->base_addr;
929 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 929 struct ei_device *ei_local = netdev_priv(dev);
930 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); 930 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
931 unsigned long flags; 931 unsigned long flags;
932 932
@@ -973,7 +973,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
973 struct net_device *dev) 973 struct net_device *dev)
974{ 974{
975 long e8390_base = dev->base_addr; 975 long e8390_base = dev->base_addr;
976 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 976 struct ei_device *ei_local = netdev_priv(dev);
977 int length, send_length, output_page; 977 int length, send_length, output_page;
978 unsigned long flags; 978 unsigned long flags;
979 u8 packet[ETH_ZLEN]; 979 u8 packet[ETH_ZLEN];
@@ -1270,7 +1270,7 @@ static void ei_tx_err(struct net_device *dev)
1270static void ei_tx_intr(struct net_device *dev) 1270static void ei_tx_intr(struct net_device *dev)
1271{ 1271{
1272 long e8390_base = dev->base_addr; 1272 long e8390_base = dev->base_addr;
1273 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1273 struct ei_device *ei_local = netdev_priv(dev);
1274 int status = inb(e8390_base + EN0_TSR); 1274 int status = inb(e8390_base + EN0_TSR);
1275 1275
1276 /* 1276 /*
@@ -1354,7 +1354,7 @@ static void ei_tx_intr(struct net_device *dev)
1354static void ei_receive(struct net_device *dev) 1354static void ei_receive(struct net_device *dev)
1355{ 1355{
1356 long e8390_base = dev->base_addr; 1356 long e8390_base = dev->base_addr;
1357 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1357 struct ei_device *ei_local = netdev_priv(dev);
1358 unsigned char rxing_page, this_frame, next_frame; 1358 unsigned char rxing_page, this_frame, next_frame;
1359 unsigned short current_offset; 1359 unsigned short current_offset;
1360 int rx_pkt_count = 0; 1360 int rx_pkt_count = 0;
@@ -1539,7 +1539,7 @@ static void ei_rx_overrun(struct net_device *dev)
1539static struct net_device_stats *get_stats(struct net_device *dev) 1539static struct net_device_stats *get_stats(struct net_device *dev)
1540{ 1540{
1541 long ioaddr = dev->base_addr; 1541 long ioaddr = dev->base_addr;
1542 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1542 struct ei_device *ei_local = netdev_priv(dev);
1543 unsigned long flags; 1543 unsigned long flags;
1544 1544
1545 /* If the card is stopped, just return the present stats. */ 1545 /* If the card is stopped, just return the present stats. */
@@ -1588,7 +1588,7 @@ static void do_set_multicast_list(struct net_device *dev)
1588{ 1588{
1589 long e8390_base = dev->base_addr; 1589 long e8390_base = dev->base_addr;
1590 int i; 1590 int i;
1591 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 1591 struct ei_device *ei_local = netdev_priv(dev);
1592 1592
1593 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { 1593 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
1594 memset(ei_local->mcfilter, 0, 8); 1594 memset(ei_local->mcfilter, 0, 8);
@@ -1646,7 +1646,7 @@ static void AX88190_init(struct net_device *dev, int startp)
1646{ 1646{
1647 axnet_dev_t *info = PRIV(dev); 1647 axnet_dev_t *info = PRIV(dev);
1648 long e8390_base = dev->base_addr; 1648 long e8390_base = dev->base_addr;
1649 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1649 struct ei_device *ei_local = netdev_priv(dev);
1650 int i; 1650 int i;
1651 int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48; 1651 int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
1652 1652
@@ -1712,7 +1712,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1712 int start_page) 1712 int start_page)
1713{ 1713{
1714 long e8390_base = dev->base_addr; 1714 long e8390_base = dev->base_addr;
1715 struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); 1715 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1716 1716
1717 if (inb_p(e8390_base) & E8390_TRANS) 1717 if (inb_p(e8390_base) & E8390_TRANS)
1718 { 1718 {
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7670aac0e93..a8445c72fc1 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -47,11 +47,11 @@ void phy_print_status(struct phy_device *phydev)
47 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev), 47 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
48 phydev->link ? "Up" : "Down"); 48 phydev->link ? "Up" : "Down");
49 if (phydev->link) 49 if (phydev->link)
50 printk(" - %d/%s", phydev->speed, 50 printk(KERN_CONT " - %d/%s", phydev->speed,
51 DUPLEX_FULL == phydev->duplex ? 51 DUPLEX_FULL == phydev->duplex ?
52 "Full" : "Half"); 52 "Full" : "Half");
53 53
54 printk("\n"); 54 printk(KERN_CONT "\n");
55} 55}
56EXPORT_SYMBOL(phy_print_status); 56EXPORT_SYMBOL(phy_print_status);
57 57
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 39659976a1a..b708f68471a 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1136,8 +1136,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1136 a four-byte PPP header on each packet */ 1136 a four-byte PPP header on each packet */
1137 *skb_push(skb, 2) = 1; 1137 *skb_push(skb, 2) = 1;
1138 if (ppp->pass_filter && 1138 if (ppp->pass_filter &&
1139 sk_run_filter(skb, ppp->pass_filter, 1139 sk_run_filter(skb, ppp->pass_filter) == 0) {
1140 ppp->pass_len) == 0) {
1141 if (ppp->debug & 1) 1140 if (ppp->debug & 1)
1142 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 1141 printk(KERN_DEBUG "PPP: outbound frame not passed\n");
1143 kfree_skb(skb); 1142 kfree_skb(skb);
@@ -1145,8 +1144,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1145 } 1144 }
1146 /* if this packet passes the active filter, record the time */ 1145 /* if this packet passes the active filter, record the time */
1147 if (!(ppp->active_filter && 1146 if (!(ppp->active_filter &&
1148 sk_run_filter(skb, ppp->active_filter, 1147 sk_run_filter(skb, ppp->active_filter) == 0))
1149 ppp->active_len) == 0))
1150 ppp->last_xmit = jiffies; 1148 ppp->last_xmit = jiffies;
1151 skb_pull(skb, 2); 1149 skb_pull(skb, 2);
1152#else 1150#else
@@ -1758,8 +1756,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1758 1756
1759 *skb_push(skb, 2) = 0; 1757 *skb_push(skb, 2) = 0;
1760 if (ppp->pass_filter && 1758 if (ppp->pass_filter &&
1761 sk_run_filter(skb, ppp->pass_filter, 1759 sk_run_filter(skb, ppp->pass_filter) == 0) {
1762 ppp->pass_len) == 0) {
1763 if (ppp->debug & 1) 1760 if (ppp->debug & 1)
1764 printk(KERN_DEBUG "PPP: inbound frame " 1761 printk(KERN_DEBUG "PPP: inbound frame "
1765 "not passed\n"); 1762 "not passed\n");
@@ -1767,8 +1764,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1767 return; 1764 return;
1768 } 1765 }
1769 if (!(ppp->active_filter && 1766 if (!(ppp->active_filter &&
1770 sk_run_filter(skb, ppp->active_filter, 1767 sk_run_filter(skb, ppp->active_filter) == 0))
1771 ppp->active_len) == 0))
1772 ppp->last_recv = jiffies; 1768 ppp->last_recv = jiffies;
1773 __skb_pull(skb, 2); 1769 __skb_pull(skb, 2);
1774 } else 1770 } else
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index ccbc91326bf..7556a9224f7 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -673,8 +673,7 @@ static int __init pptp_init_module(void)
673 int err = 0; 673 int err = 0;
674 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n"); 674 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
675 675
676 callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *), 676 callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
677 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
678 if (!callid_sock) { 677 if (!callid_sock) {
679 pr_err("PPTP: cann't allocate memory\n"); 678 pr_err("PPTP: cann't allocate memory\n");
680 return -ENOMEM; 679 return -ENOMEM;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 7496ed2c34a..1a3584edd79 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2467,7 +2467,7 @@ map_error:
2467static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2467static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2468 struct net_device *ndev) 2468 struct net_device *ndev)
2469{ 2469{
2470 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2470 struct ql3_adapter *qdev = netdev_priv(ndev);
2471 struct ql3xxx_port_registers __iomem *port_regs = 2471 struct ql3xxx_port_registers __iomem *port_regs =
2472 qdev->mem_map_registers; 2472 qdev->mem_map_registers;
2473 struct ql_tx_buf_cb *tx_cb; 2473 struct ql_tx_buf_cb *tx_cb;
@@ -3390,7 +3390,7 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3390 3390
3391static void ql_display_dev_info(struct net_device *ndev) 3391static void ql_display_dev_info(struct net_device *ndev)
3392{ 3392{
3393 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3393 struct ql3_adapter *qdev = netdev_priv(ndev);
3394 struct pci_dev *pdev = qdev->pdev; 3394 struct pci_dev *pdev = qdev->pdev;
3395 3395
3396 netdev_info(ndev, 3396 netdev_info(ndev,
@@ -3573,7 +3573,7 @@ static int ql3xxx_open(struct net_device *ndev)
3573 3573
3574static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3574static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3575{ 3575{
3576 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3576 struct ql3_adapter *qdev = netdev_priv(ndev);
3577 struct ql3xxx_port_registers __iomem *port_regs = 3577 struct ql3xxx_port_registers __iomem *port_regs =
3578 qdev->mem_map_registers; 3578 qdev->mem_map_registers;
3579 struct sockaddr *addr = p; 3579 struct sockaddr *addr = p;
@@ -3608,7 +3608,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3608 3608
3609static void ql3xxx_tx_timeout(struct net_device *ndev) 3609static void ql3xxx_tx_timeout(struct net_device *ndev)
3610{ 3610{
3611 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3611 struct ql3_adapter *qdev = netdev_priv(ndev);
3612 3612
3613 netdev_err(ndev, "Resetting...\n"); 3613 netdev_err(ndev, "Resetting...\n");
3614 /* 3614 /*
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 8ecc170c9b7..f267da42f24 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#ifndef _QLCNIC_H_ 8#ifndef _QLCNIC_H_
@@ -51,8 +34,8 @@
51 34
52#define _QLCNIC_LINUX_MAJOR 5 35#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 36#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 11 37#define _QLCNIC_LINUX_SUBVERSION 13
55#define QLCNIC_LINUX_VERSIONID "5.0.11" 38#define QLCNIC_LINUX_VERSIONID "5.0.13"
56#define QLCNIC_DRV_IDC_VER 0x01 39#define QLCNIC_DRV_IDC_VER 0x01
57#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 40#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
58 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -798,7 +781,6 @@ struct qlcnic_nic_intr_coalesce {
798#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16 781#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
799#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17 782#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
800#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18 783#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
801#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
802#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20 784#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
803#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21 785#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
804#define QLCNIC_C2C_OPCODE 22 786#define QLCNIC_C2C_OPCODE 22
@@ -923,6 +905,7 @@ struct qlcnic_ipaddr {
923#define QLCNIC_MACSPOOF 0x200 905#define QLCNIC_MACSPOOF 0x200
924#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400 906#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
925#define QLCNIC_PROMISC_DISABLED 0x800 907#define QLCNIC_PROMISC_DISABLED 0x800
908#define QLCNIC_NEED_FLR 0x1000
926#define QLCNIC_IS_MSI_FAMILY(adapter) \ 909#define QLCNIC_IS_MSI_FAMILY(adapter) \
927 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 910 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
928 911
@@ -1126,8 +1109,7 @@ struct qlcnic_eswitch {
1126/* Return codes for Error handling */ 1109/* Return codes for Error handling */
1127#define QL_STATUS_INVALID_PARAM -1 1110#define QL_STATUS_INVALID_PARAM -1
1128 1111
1129#define MAX_BW 100 1112#define MAX_BW 100 /* % of link speed */
1130#define MIN_BW 1
1131#define MAX_VLAN_ID 4095 1113#define MAX_VLAN_ID 4095
1132#define MIN_VLAN_ID 2 1114#define MIN_VLAN_ID 2
1133#define MAX_TX_QUEUES 1 1115#define MAX_TX_QUEUES 1
@@ -1135,7 +1117,7 @@ struct qlcnic_eswitch {
1135#define DEFAULT_MAC_LEARN 1 1117#define DEFAULT_MAC_LEARN 1
1136 1118
1137#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) 1119#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
1138#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW) 1120#define IS_VALID_BW(bw) (bw <= MAX_BW)
1139#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) 1121#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1140#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) 1122#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
1141 1123
@@ -1314,21 +1296,15 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1314int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); 1296int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1315void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, 1297void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1316 struct qlcnic_host_tx_ring *tx_ring); 1298 struct qlcnic_host_tx_ring *tx_ring);
1317void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
1318int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
1319void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); 1299void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
1320 1300
1321/* Functions from qlcnic_main.c */ 1301/* Functions from qlcnic_main.c */
1322int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter);
1323void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter);
1324int qlcnic_reset_context(struct qlcnic_adapter *); 1302int qlcnic_reset_context(struct qlcnic_adapter *);
1325u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter, 1303u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
1326 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd); 1304 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
1327void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); 1305void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1328int qlcnic_diag_alloc_res(struct net_device *netdev, int test); 1306int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1329int qlcnic_check_loopback_buff(unsigned char *data);
1330netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 1307netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1331void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1332 1308
1333/* Management functions */ 1309/* Management functions */
1334int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); 1310int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
@@ -1377,6 +1353,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
1377 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, 1353 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1378 {0x1077, 0x8020, 0x103c, 0x3733, 1354 {0x1077, 0x8020, 0x103c, 0x3733,
1379 "NC523SFP 10Gb 2-port Server Adapter"}, 1355 "NC523SFP 10Gb 2-port Server Adapter"},
1356 {0x1077, 0x8020, 0x103c, 0x3346,
1357 "CN1000Q Dual Port Converged Network Adapter"},
1380 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, 1358 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1381}; 1359};
1382 1360
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 1cdc05dade6..27631f23b3f 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include "qlcnic.h" 8#include "qlcnic.h"
@@ -480,6 +463,11 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
480{ 463{
481 int err; 464 int err;
482 465
466 if (adapter->flags & QLCNIC_NEED_FLR) {
467 pci_reset_function(adapter->pdev);
468 adapter->flags &= ~QLCNIC_NEED_FLR;
469 }
470
483 err = qlcnic_fw_cmd_create_rx_ctx(adapter); 471 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
484 if (err) 472 if (err)
485 return err; 473 return err;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index ec21d24015c..0eaf31bf8a0 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include <linux/types.h> 8#include <linux/types.h>
@@ -101,8 +84,7 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
101static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { 84static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
102 "Register_Test_on_offline", 85 "Register_Test_on_offline",
103 "Link_Test_on_offline", 86 "Link_Test_on_offline",
104 "Interrupt_Test_offline", 87 "Interrupt_Test_offline"
105 "Loopback_Test_offline"
106}; 88};
107 89
108#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 90#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
@@ -643,104 +625,6 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
643 } 625 }
644} 626}
645 627
646#define QLC_ILB_PKT_SIZE 64
647#define QLC_NUM_ILB_PKT 16
648#define QLC_ILB_MAX_RCV_LOOP 10
649
650static void qlcnic_create_loopback_buff(unsigned char *data)
651{
652 unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
653 memset(data, 0x4e, QLC_ILB_PKT_SIZE);
654 memset(data, 0xff, 12);
655 memcpy(data + 12, random_data, sizeof(random_data));
656}
657
658int qlcnic_check_loopback_buff(unsigned char *data)
659{
660 unsigned char buff[QLC_ILB_PKT_SIZE];
661 qlcnic_create_loopback_buff(buff);
662 return memcmp(data, buff, QLC_ILB_PKT_SIZE);
663}
664
665static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
666{
667 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
668 struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
669 struct sk_buff *skb;
670 int i, loop, cnt = 0;
671
672 for (i = 0; i < QLC_NUM_ILB_PKT; i++) {
673 skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
674 qlcnic_create_loopback_buff(skb->data);
675 skb_put(skb, QLC_ILB_PKT_SIZE);
676
677 adapter->diag_cnt = 0;
678 qlcnic_xmit_frame(skb, adapter->netdev);
679
680 loop = 0;
681 do {
682 msleep(1);
683 qlcnic_process_rcv_ring_diag(sds_ring);
684 } while (loop++ < QLC_ILB_MAX_RCV_LOOP &&
685 !adapter->diag_cnt);
686
687 dev_kfree_skb_any(skb);
688
689 if (!adapter->diag_cnt)
690 dev_warn(&adapter->pdev->dev, "ILB Test: %dth packet"
691 " not recevied\n", i + 1);
692 else
693 cnt++;
694 }
695 if (cnt != i) {
696 dev_warn(&adapter->pdev->dev, "ILB Test failed\n");
697 return -1;
698 }
699 return 0;
700}
701
702static int qlcnic_loopback_test(struct net_device *netdev)
703{
704 struct qlcnic_adapter *adapter = netdev_priv(netdev);
705 int max_sds_rings = adapter->max_sds_rings;
706 int ret;
707
708 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
709 dev_warn(&adapter->pdev->dev, "Loopback test not supported"
710 "for non privilege function\n");
711 return 0;
712 }
713
714 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
715 return -EIO;
716
717 if (qlcnic_request_quiscent_mode(adapter)) {
718 clear_bit(__QLCNIC_RESETTING, &adapter->state);
719 return -EIO;
720 }
721
722 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
723 if (ret)
724 goto clear_it;
725
726 ret = qlcnic_set_ilb_mode(adapter);
727 if (ret)
728 goto done;
729
730 ret = qlcnic_do_ilb_test(adapter);
731
732 qlcnic_clear_ilb_mode(adapter);
733
734done:
735 qlcnic_diag_free_res(netdev, max_sds_rings);
736
737clear_it:
738 qlcnic_clear_quiscent_mode(adapter);
739 adapter->max_sds_rings = max_sds_rings;
740 clear_bit(__QLCNIC_RESETTING, &adapter->state);
741 return ret;
742}
743
744static int qlcnic_irq_test(struct net_device *netdev) 628static int qlcnic_irq_test(struct net_device *netdev)
745{ 629{
746 struct qlcnic_adapter *adapter = netdev_priv(netdev); 630 struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -793,9 +677,6 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
793 if (data[2]) 677 if (data[2])
794 eth_test->flags |= ETH_TEST_FL_FAILED; 678 eth_test->flags |= ETH_TEST_FL_FAILED;
795 679
796 data[3] = qlcnic_loopback_test(dev);
797 if (data[3])
798 eth_test->flags |= ETH_TEST_FL_FAILED;
799 680
800 } 681 }
801} 682}
@@ -925,9 +806,10 @@ static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
925 806
926 dev->features &= ~NETIF_F_LRO; 807 dev->features &= ~NETIF_F_LRO;
927 qlcnic_send_lro_cleanup(adapter); 808 qlcnic_send_lro_cleanup(adapter);
809 dev_info(&adapter->pdev->dev,
810 "disabling LRO as rx_csum is off\n");
928 } 811 }
929 adapter->rx_csum = !!data; 812 adapter->rx_csum = !!data;
930 dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
931 return 0; 813 return 0;
932} 814}
933 815
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 4290b80cde1..19328e05b75 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#ifndef __QLCNIC_HDR_H_ 8#ifndef __QLCNIC_HDR_H_
@@ -722,7 +705,7 @@ enum {
722#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ 705#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
723#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ 706#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
724 707
725#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4))) 708#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
726#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) 709#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
727#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) 710#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
728#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) 711#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 7a47a2a7ee2..c9c4bf1458a 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include "qlcnic.h" 8#include "qlcnic.h"
@@ -1234,56 +1217,3 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1234 1217
1235 return rv; 1218 return rv;
1236} 1219}
1237
1238static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
1239{
1240 struct qlcnic_nic_req req;
1241 int rv;
1242 u64 word;
1243
1244 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1245 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1246
1247 word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
1248 ((u64)adapter->portnum << 16);
1249 req.req_hdr = cpu_to_le64(word);
1250 req.words[0] = cpu_to_le64(flag);
1251
1252 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1253 if (rv)
1254 dev_err(&adapter->pdev->dev,
1255 "%sting loopback mode failed.\n",
1256 flag ? "Set" : "Reset");
1257 return rv;
1258}
1259
1260int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
1261{
1262 if (qlcnic_set_fw_loopback(adapter, 1))
1263 return -EIO;
1264
1265 if (qlcnic_nic_set_promisc(adapter,
1266 VPORT_MISS_MODE_ACCEPT_ALL)) {
1267 qlcnic_set_fw_loopback(adapter, 0);
1268 return -EIO;
1269 }
1270
1271 msleep(1000);
1272 return 0;
1273}
1274
1275void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
1276{
1277 int mode = VPORT_MISS_MODE_DROP;
1278 struct net_device *netdev = adapter->netdev;
1279
1280 qlcnic_set_fw_loopback(adapter, 0);
1281
1282 if (netdev->flags & IFF_PROMISC)
1283 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1284 else if (netdev->flags & IFF_ALLMULTI)
1285 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1286
1287 qlcnic_nic_set_promisc(adapter, mode);
1288 msleep(1000);
1289}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 0d180c6e41f..9b9c7c39d3e 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include <linux/netdevice.h> 8#include <linux/netdevice.h>
@@ -236,12 +219,11 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
236 tx_ring->num_desc = adapter->num_txd; 219 tx_ring->num_desc = adapter->num_txd;
237 tx_ring->txq = netdev_get_tx_queue(netdev, 0); 220 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
238 221
239 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); 222 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
240 if (cmd_buf_arr == NULL) { 223 if (cmd_buf_arr == NULL) {
241 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); 224 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
242 goto err_out; 225 goto err_out;
243 } 226 }
244 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
245 tx_ring->cmd_buf_arr = cmd_buf_arr; 227 tx_ring->cmd_buf_arr = cmd_buf_arr;
246 228
247 recv_ctx = &adapter->recv_ctx; 229 recv_ctx = &adapter->recv_ctx;
@@ -275,14 +257,12 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
275 rds_ring->dma_size + NET_IP_ALIGN; 257 rds_ring->dma_size + NET_IP_ALIGN;
276 break; 258 break;
277 } 259 }
278 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *) 260 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
279 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
280 if (rds_ring->rx_buf_arr == NULL) { 261 if (rds_ring->rx_buf_arr == NULL) {
281 dev_err(&netdev->dev, "Failed to allocate " 262 dev_err(&netdev->dev, "Failed to allocate "
282 "rx buffer ring %d\n", ring); 263 "rx buffer ring %d\n", ring);
283 goto err_out; 264 goto err_out;
284 } 265 }
285 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
286 INIT_LIST_HEAD(&rds_ring->free_list); 266 INIT_LIST_HEAD(&rds_ring->free_list);
287 /* 267 /*
288 * Now go through all of them, set reference handles 268 * Now go through all of them, set reference handles
@@ -1693,99 +1673,6 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1693 spin_unlock(&rds_ring->lock); 1673 spin_unlock(&rds_ring->lock);
1694} 1674}
1695 1675
1696static void dump_skb(struct sk_buff *skb)
1697{
1698 int i;
1699 unsigned char *data = skb->data;
1700
1701 for (i = 0; i < skb->len; i++) {
1702 printk("%02x ", data[i]);
1703 if ((i & 0x0f) == 8)
1704 printk("\n");
1705 }
1706}
1707
1708static struct qlcnic_rx_buffer *
1709qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1710 struct qlcnic_host_sds_ring *sds_ring,
1711 int ring, u64 sts_data0)
1712{
1713 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1714 struct qlcnic_rx_buffer *buffer;
1715 struct sk_buff *skb;
1716 struct qlcnic_host_rds_ring *rds_ring;
1717 int index, length, cksum, pkt_offset;
1718
1719 if (unlikely(ring >= adapter->max_rds_rings))
1720 return NULL;
1721
1722 rds_ring = &recv_ctx->rds_rings[ring];
1723
1724 index = qlcnic_get_sts_refhandle(sts_data0);
1725 if (unlikely(index >= rds_ring->num_desc))
1726 return NULL;
1727
1728 buffer = &rds_ring->rx_buf_arr[index];
1729
1730 length = qlcnic_get_sts_totallength(sts_data0);
1731 cksum = qlcnic_get_sts_status(sts_data0);
1732 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1733
1734 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1735 if (!skb)
1736 return buffer;
1737
1738 if (length > rds_ring->skb_size)
1739 skb_put(skb, rds_ring->skb_size);
1740 else
1741 skb_put(skb, length);
1742
1743 if (pkt_offset)
1744 skb_pull(skb, pkt_offset);
1745
1746 if (!qlcnic_check_loopback_buff(skb->data))
1747 adapter->diag_cnt++;
1748 else
1749 dump_skb(skb);
1750
1751 dev_kfree_skb_any(skb);
1752 adapter->stats.rx_pkts++;
1753 adapter->stats.rxbytes += length;
1754
1755 return buffer;
1756}
1757
1758void
1759qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1760{
1761 struct qlcnic_adapter *adapter = sds_ring->adapter;
1762 struct status_desc *desc;
1763 struct qlcnic_rx_buffer *rxbuf;
1764 u64 sts_data0;
1765
1766 int opcode, ring, desc_cnt;
1767 u32 consumer = sds_ring->consumer;
1768
1769 desc = &sds_ring->desc_head[consumer];
1770 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1771
1772 if (!(sts_data0 & STATUS_OWNER_HOST))
1773 return;
1774
1775 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1776 opcode = qlcnic_get_sts_opcode(sts_data0);
1777
1778 ring = qlcnic_get_sts_type(sts_data0);
1779 rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
1780 ring, sts_data0);
1781
1782 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1783 consumer = get_next_index(consumer, sds_ring->num_desc);
1784
1785 sds_ring->consumer = consumer;
1786 writel(consumer, sds_ring->crb_sts_consumer);
1787}
1788
1789void 1676void
1790qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2, 1677qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
1791 u8 alt_mac, u8 *mac) 1678 u8 alt_mac, u8 *mac)
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index a3dcd04be22..788850e2ba4 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include <linux/slab.h> 8#include <linux/slab.h>
@@ -1485,6 +1468,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1485 uint8_t revision_id; 1468 uint8_t revision_id;
1486 uint8_t pci_using_dac; 1469 uint8_t pci_using_dac;
1487 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN]; 1470 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
1471 u32 val;
1488 1472
1489 err = pci_enable_device(pdev); 1473 err = pci_enable_device(pdev);
1490 if (err) 1474 if (err)
@@ -1546,6 +1530,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1546 if (err) 1530 if (err)
1547 goto err_out_iounmap; 1531 goto err_out_iounmap;
1548 1532
1533 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
1534 if (QLC_DEV_CHECK_ACTIVE(val, adapter->portnum))
1535 adapter->flags |= QLCNIC_NEED_FLR;
1536
1549 err = adapter->nic_ops->start_firmware(adapter); 1537 err = adapter->nic_ops->start_firmware(adapter);
1550 if (err) { 1538 if (err) {
1551 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 1539 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
@@ -2854,61 +2842,6 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2854 qlcnic_api_unlock(adapter); 2842 qlcnic_api_unlock(adapter);
2855} 2843}
2856 2844
2857/* Caller should held RESETTING bit.
2858 * This should be call in sync with qlcnic_request_quiscent_mode.
2859 */
2860void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter)
2861{
2862 qlcnic_clr_drv_state(adapter);
2863 qlcnic_api_lock(adapter);
2864 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
2865 qlcnic_api_unlock(adapter);
2866}
2867
2868/* Caller should held RESETTING bit.
2869 */
2870int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter)
2871{
2872 u8 timeo = adapter->dev_init_timeo / 2;
2873 u32 state;
2874
2875 if (qlcnic_api_lock(adapter))
2876 return -EIO;
2877
2878 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2879 if (state != QLCNIC_DEV_READY)
2880 return -EIO;
2881
2882 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_QUISCENT);
2883 qlcnic_api_unlock(adapter);
2884 QLCDB(adapter, DRV, "NEED QUISCENT state set\n");
2885 qlcnic_idc_debug_info(adapter, 0);
2886
2887 qlcnic_set_drv_state(adapter, QLCNIC_DEV_NEED_QUISCENT);
2888
2889 do {
2890 msleep(2000);
2891 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2892 if (state == QLCNIC_DEV_QUISCENT)
2893 return 0;
2894 if (!qlcnic_check_drv_state(adapter)) {
2895 if (qlcnic_api_lock(adapter))
2896 return -EIO;
2897 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2898 QLCNIC_DEV_QUISCENT);
2899 qlcnic_api_unlock(adapter);
2900 QLCDB(adapter, DRV, "QUISCENT mode set\n");
2901 return 0;
2902 }
2903 } while (--timeo);
2904
2905 dev_err(&adapter->pdev->dev, "Failed to quiesce device, DRV_STATE=%08x"
2906 " DRV_ACTIVE=%08x\n", QLCRD32(adapter, QLCNIC_CRB_DRV_STATE),
2907 QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE));
2908 qlcnic_clear_quiscent_mode(adapter);
2909 return -EIO;
2910}
2911
2912/*Transit to RESET state from READY state only */ 2845/*Transit to RESET state from READY state only */
2913static void 2846static void
2914qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) 2847qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
@@ -3587,9 +3520,12 @@ validate_esw_config(struct qlcnic_adapter *adapter,
3587 case QLCNIC_PORT_DEFAULTS: 3520 case QLCNIC_PORT_DEFAULTS:
3588 if (QLC_DEV_GET_DRV(op_mode, pci_func) != 3521 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
3589 QLCNIC_NON_PRIV_FUNC) { 3522 QLCNIC_NON_PRIV_FUNC) {
3590 esw_cfg[i].mac_anti_spoof = 0; 3523 if (esw_cfg[i].mac_anti_spoof != 0)
3591 esw_cfg[i].mac_override = 1; 3524 return QL_STATUS_INVALID_PARAM;
3592 esw_cfg[i].promisc_mode = 1; 3525 if (esw_cfg[i].mac_override != 1)
3526 return QL_STATUS_INVALID_PARAM;
3527 if (esw_cfg[i].promisc_mode != 1)
3528 return QL_STATUS_INVALID_PARAM;
3593 } 3529 }
3594 break; 3530 break;
3595 case QLCNIC_ADD_VLAN: 3531 case QLCNIC_ADD_VLAN:
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 22821398fc6..bdb8fe86853 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00.25.00.00-01" 19#define DRV_VERSION "v1.00.00.27.00.00-01"
20 20
21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
22 22
@@ -2221,6 +2221,7 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
2221int ql_unpause_mpi_risc(struct ql_adapter *qdev); 2221int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2222int ql_pause_mpi_risc(struct ql_adapter *qdev); 2222int ql_pause_mpi_risc(struct ql_adapter *qdev);
2223int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); 2223int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2224int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
2224int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, 2225int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2225 u32 ram_addr, int word_count); 2226 u32 ram_addr, int word_count);
2226int ql_core_dump(struct ql_adapter *qdev, 2227int ql_core_dump(struct ql_adapter *qdev,
@@ -2236,6 +2237,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
2236int ql_mb_get_port_cfg(struct ql_adapter *qdev); 2237int ql_mb_get_port_cfg(struct ql_adapter *qdev);
2237int ql_mb_set_port_cfg(struct ql_adapter *qdev); 2238int ql_mb_set_port_cfg(struct ql_adapter *qdev);
2238int ql_wait_fifo_empty(struct ql_adapter *qdev); 2239int ql_wait_fifo_empty(struct ql_adapter *qdev);
2240void ql_get_dump(struct ql_adapter *qdev, void *buff);
2239void ql_gen_reg_dump(struct ql_adapter *qdev, 2241void ql_gen_reg_dump(struct ql_adapter *qdev,
2240 struct ql_reg_dump *mpi_coredump); 2242 struct ql_reg_dump *mpi_coredump);
2241netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2243netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 4747492935e..fca804f36d6 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1317,9 +1317,28 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1318 if (status) 1318 if (status)
1319 return; 1319 return;
1320}
1321
1322void ql_get_dump(struct ql_adapter *qdev, void *buff)
1323{
1324 /*
1325 * If the dump has already been taken and is stored
1326 * in our internal buffer and if force dump is set then
1327 * just start the spool to dump it to the log file
1328 * and also, take a snapshot of the general regs to
1329 * to the user's buffer or else take complete dump
1330 * to the user's buffer if force is not set.
1331 */
1320 1332
1321 if (test_bit(QL_FRC_COREDUMP, &qdev->flags)) 1333 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1334 if (!ql_core_dump(qdev, buff))
1335 ql_soft_reset_mpi_risc(qdev);
1336 else
1337 netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1338 } else {
1339 ql_gen_reg_dump(qdev, buff);
1322 ql_get_core_dump(qdev); 1340 ql_get_core_dump(qdev);
1341 }
1323} 1342}
1324 1343
1325/* Coredump to messages log file using separate worker thread */ 1344/* Coredump to messages log file using separate worker thread */
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 4892d64f4e0..8149cc9de4c 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -375,7 +375,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); 375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
376 drvinfo->n_stats = 0; 376 drvinfo->n_stats = 0;
377 drvinfo->testinfo_len = 0; 377 drvinfo->testinfo_len = 0;
378 drvinfo->regdump_len = 0; 378 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
379 drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
380 else
381 drvinfo->regdump_len = sizeof(struct ql_reg_dump);
379 drvinfo->eedump_len = 0; 382 drvinfo->eedump_len = 0;
380} 383}
381 384
@@ -547,7 +550,12 @@ static void ql_self_test(struct net_device *ndev,
547 550
548static int ql_get_regs_len(struct net_device *ndev) 551static int ql_get_regs_len(struct net_device *ndev)
549{ 552{
550 return sizeof(struct ql_reg_dump); 553 struct ql_adapter *qdev = netdev_priv(ndev);
554
555 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
556 return sizeof(struct ql_mpi_coredump);
557 else
558 return sizeof(struct ql_reg_dump);
551} 559}
552 560
553static void ql_get_regs(struct net_device *ndev, 561static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@ static void ql_get_regs(struct net_device *ndev,
555{ 563{
556 struct ql_adapter *qdev = netdev_priv(ndev); 564 struct ql_adapter *qdev = netdev_priv(ndev);
557 565
558 ql_gen_reg_dump(qdev, p); 566 ql_get_dump(qdev, p);
567 qdev->core_is_dumped = 0;
568 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
569 regs->len = sizeof(struct ql_mpi_coredump);
570 else
571 regs->len = sizeof(struct ql_reg_dump);
559} 572}
560 573
561static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 574static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 528eaef5308..e4dbbbfec72 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3844,7 +3844,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3844 3844
3845static void ql_display_dev_info(struct net_device *ndev) 3845static void ql_display_dev_info(struct net_device *ndev)
3846{ 3846{
3847 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3847 struct ql_adapter *qdev = netdev_priv(ndev);
3848 3848
3849 netif_info(qdev, probe, qdev->ndev, 3849 netif_info(qdev, probe, qdev->ndev,
3850 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " 3850 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@ -4264,7 +4264,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
4264 4264
4265static void qlge_set_multicast_list(struct net_device *ndev) 4265static void qlge_set_multicast_list(struct net_device *ndev)
4266{ 4266{
4267 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4267 struct ql_adapter *qdev = netdev_priv(ndev);
4268 struct netdev_hw_addr *ha; 4268 struct netdev_hw_addr *ha;
4269 int i, status; 4269 int i, status;
4270 4270
@@ -4354,7 +4354,7 @@ exit:
4354 4354
4355static int qlge_set_mac_address(struct net_device *ndev, void *p) 4355static int qlge_set_mac_address(struct net_device *ndev, void *p)
4356{ 4356{
4357 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4357 struct ql_adapter *qdev = netdev_priv(ndev);
4358 struct sockaddr *addr = p; 4358 struct sockaddr *addr = p;
4359 int status; 4359 int status;
4360 4360
@@ -4377,7 +4377,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
4377 4377
4378static void qlge_tx_timeout(struct net_device *ndev) 4378static void qlge_tx_timeout(struct net_device *ndev)
4379{ 4379{
4380 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4380 struct ql_adapter *qdev = netdev_priv(ndev);
4381 ql_queue_asic_error(qdev); 4381 ql_queue_asic_error(qdev);
4382} 4382}
4383 4383
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 0e7c7c7ee16..100a462cc91 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
87 return status; 87 return status;
88} 88}
89 89
90static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) 90int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
91{ 91{
92 int status; 92 int status;
93 status = ql_write_mpi_reg(qdev, 0x00001010, 1); 93 status = ql_write_mpi_reg(qdev, 0x00001010, 1);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ecc25aab896..0f4219cb0be 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -8321,8 +8321,7 @@ mem_alloc_failed:
8321 8321
8322static void __devexit s2io_rem_nic(struct pci_dev *pdev) 8322static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8323{ 8323{
8324 struct net_device *dev = 8324 struct net_device *dev = pci_get_drvdata(pdev);
8325 (struct net_device *)pci_get_drvdata(pdev);
8326 struct s2io_nic *sp; 8325 struct s2io_nic *sp;
8327 8326
8328 if (dev == NULL) { 8327 if (dev == NULL) {
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 417adf37282..76290a8c3c1 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1449,7 +1449,8 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1449 dev->irq = pdev->irq; 1449 dev->irq = pdev->irq;
1450 1450
1451 /* faked with skb_copy_and_csum_dev */ 1451 /* faked with skb_copy_and_csum_dev */
1452 dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; 1452 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
1453 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1453 1454
1454 dev->netdev_ops = &sc92031_netdev_ops; 1455 dev->netdev_ops = &sc92031_netdev_ops;
1455 dev->watchdog_timeo = TX_TIMEOUT; 1456 dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index fb83cdd9464..2166c1d0a53 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -23,7 +23,6 @@
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include "net_driver.h" 24#include "net_driver.h"
25#include "efx.h" 25#include "efx.h"
26#include "mdio_10g.h"
27#include "nic.h" 26#include "nic.h"
28 27
29#include "mcdi.h" 28#include "mcdi.h"
@@ -921,6 +920,7 @@ static void efx_mac_work(struct work_struct *data)
921 920
922static int efx_probe_port(struct efx_nic *efx) 921static int efx_probe_port(struct efx_nic *efx)
923{ 922{
923 unsigned char *perm_addr;
924 int rc; 924 int rc;
925 925
926 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 926 netif_dbg(efx, probe, efx->net_dev, "create port\n");
@@ -934,11 +934,12 @@ static int efx_probe_port(struct efx_nic *efx)
934 return rc; 934 return rc;
935 935
936 /* Sanity check MAC address */ 936 /* Sanity check MAC address */
937 if (is_valid_ether_addr(efx->mac_address)) { 937 perm_addr = efx->net_dev->perm_addr;
938 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); 938 if (is_valid_ether_addr(perm_addr)) {
939 memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
939 } else { 940 } else {
940 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", 941 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
941 efx->mac_address); 942 perm_addr);
942 if (!allow_bad_hwaddr) { 943 if (!allow_bad_hwaddr) {
943 rc = -EINVAL; 944 rc = -EINVAL;
944 goto err; 945 goto err;
@@ -1980,7 +1981,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
1980 1981
1981 efx_stop_all(efx); 1982 efx_stop_all(efx);
1982 mutex_lock(&efx->mac_lock); 1983 mutex_lock(&efx->mac_lock);
1983 mutex_lock(&efx->spi_lock);
1984 1984
1985 efx_fini_channels(efx); 1985 efx_fini_channels(efx);
1986 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 1986 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2022,7 +2022,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2022 efx_init_channels(efx); 2022 efx_init_channels(efx);
2023 efx_restore_filters(efx); 2023 efx_restore_filters(efx);
2024 2024
2025 mutex_unlock(&efx->spi_lock);
2026 mutex_unlock(&efx->mac_lock); 2025 mutex_unlock(&efx->mac_lock);
2027 2026
2028 efx_start_all(efx); 2027 efx_start_all(efx);
@@ -2032,7 +2031,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2032fail: 2031fail:
2033 efx->port_initialized = false; 2032 efx->port_initialized = false;
2034 2033
2035 mutex_unlock(&efx->spi_lock);
2036 mutex_unlock(&efx->mac_lock); 2034 mutex_unlock(&efx->mac_lock);
2037 2035
2038 return rc; 2036 return rc;
@@ -2220,8 +2218,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2220 /* Initialise common structures */ 2218 /* Initialise common structures */
2221 memset(efx, 0, sizeof(*efx)); 2219 memset(efx, 0, sizeof(*efx));
2222 spin_lock_init(&efx->biu_lock); 2220 spin_lock_init(&efx->biu_lock);
2223 mutex_init(&efx->mdio_lock);
2224 mutex_init(&efx->spi_lock);
2225#ifdef CONFIG_SFC_MTD 2221#ifdef CONFIG_SFC_MTD
2226 INIT_LIST_HEAD(&efx->mtd_list); 2222 INIT_LIST_HEAD(&efx->mtd_list);
2227#endif 2223#endif
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index edb9d16b8b4..aae756bf47e 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -17,8 +17,6 @@
17#include "efx.h" 17#include "efx.h"
18#include "filter.h" 18#include "filter.h"
19#include "nic.h" 19#include "nic.h"
20#include "spi.h"
21#include "mdio_10g.h"
22 20
23struct ethtool_string { 21struct ethtool_string {
24 char name[ETH_GSTRING_LEN]; 22 char name[ETH_GSTRING_LEN];
@@ -629,61 +627,6 @@ static u32 efx_ethtool_get_link(struct net_device *net_dev)
629 return efx->link_state.up; 627 return efx->link_state.up;
630} 628}
631 629
632static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
633{
634 struct efx_nic *efx = netdev_priv(net_dev);
635 struct efx_spi_device *spi = efx->spi_eeprom;
636
637 if (!spi)
638 return 0;
639 return min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
640 min(spi->size, EFX_EEPROM_BOOTCONFIG_START);
641}
642
643static int efx_ethtool_get_eeprom(struct net_device *net_dev,
644 struct ethtool_eeprom *eeprom, u8 *buf)
645{
646 struct efx_nic *efx = netdev_priv(net_dev);
647 struct efx_spi_device *spi = efx->spi_eeprom;
648 size_t len;
649 int rc;
650
651 rc = mutex_lock_interruptible(&efx->spi_lock);
652 if (rc)
653 return rc;
654 rc = falcon_spi_read(efx, spi,
655 eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
656 eeprom->len, &len, buf);
657 mutex_unlock(&efx->spi_lock);
658
659 eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
660 eeprom->len = len;
661 return rc;
662}
663
664static int efx_ethtool_set_eeprom(struct net_device *net_dev,
665 struct ethtool_eeprom *eeprom, u8 *buf)
666{
667 struct efx_nic *efx = netdev_priv(net_dev);
668 struct efx_spi_device *spi = efx->spi_eeprom;
669 size_t len;
670 int rc;
671
672 if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
673 return -EINVAL;
674
675 rc = mutex_lock_interruptible(&efx->spi_lock);
676 if (rc)
677 return rc;
678 rc = falcon_spi_write(efx, spi,
679 eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
680 eeprom->len, &len, buf);
681 mutex_unlock(&efx->spi_lock);
682
683 eeprom->len = len;
684 return rc;
685}
686
687static int efx_ethtool_get_coalesce(struct net_device *net_dev, 630static int efx_ethtool_get_coalesce(struct net_device *net_dev,
688 struct ethtool_coalesce *coalesce) 631 struct ethtool_coalesce *coalesce)
689{ 632{
@@ -1116,9 +1059,6 @@ const struct ethtool_ops efx_ethtool_ops = {
1116 .set_msglevel = efx_ethtool_set_msglevel, 1059 .set_msglevel = efx_ethtool_set_msglevel,
1117 .nway_reset = efx_ethtool_nway_reset, 1060 .nway_reset = efx_ethtool_nway_reset,
1118 .get_link = efx_ethtool_get_link, 1061 .get_link = efx_ethtool_get_link,
1119 .get_eeprom_len = efx_ethtool_get_eeprom_len,
1120 .get_eeprom = efx_ethtool_get_eeprom,
1121 .set_eeprom = efx_ethtool_set_eeprom,
1122 .get_coalesce = efx_ethtool_get_coalesce, 1062 .get_coalesce = efx_ethtool_get_coalesce,
1123 .set_coalesce = efx_ethtool_set_coalesce, 1063 .set_coalesce = efx_ethtool_set_coalesce,
1124 .get_ringparam = efx_ethtool_get_ringparam, 1064 .get_ringparam = efx_ethtool_get_ringparam,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 267019bb2b1..70e4f7dcce8 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -24,7 +24,6 @@
24#include "nic.h" 24#include "nic.h"
25#include "regs.h" 25#include "regs.h"
26#include "io.h" 26#include "io.h"
27#include "mdio_10g.h"
28#include "phy.h" 27#include "phy.h"
29#include "workarounds.h" 28#include "workarounds.h"
30 29
@@ -255,7 +254,6 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
255 /* Input validation */ 254 /* Input validation */
256 if (len > FALCON_SPI_MAX_LEN) 255 if (len > FALCON_SPI_MAX_LEN)
257 return -EINVAL; 256 return -EINVAL;
258 BUG_ON(!mutex_is_locked(&efx->spi_lock));
259 257
260 /* Check that previous command is not still running */ 258 /* Check that previous command is not still running */
261 rc = falcon_spi_poll(efx); 259 rc = falcon_spi_poll(efx);
@@ -719,6 +717,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
719 int prtad, int devad, u16 addr, u16 value) 717 int prtad, int devad, u16 addr, u16 value)
720{ 718{
721 struct efx_nic *efx = netdev_priv(net_dev); 719 struct efx_nic *efx = netdev_priv(net_dev);
720 struct falcon_nic_data *nic_data = efx->nic_data;
722 efx_oword_t reg; 721 efx_oword_t reg;
723 int rc; 722 int rc;
724 723
@@ -726,7 +725,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
726 "writing MDIO %d register %d.%d with 0x%04x\n", 725 "writing MDIO %d register %d.%d with 0x%04x\n",
727 prtad, devad, addr, value); 726 prtad, devad, addr, value);
728 727
729 mutex_lock(&efx->mdio_lock); 728 mutex_lock(&nic_data->mdio_lock);
730 729
731 /* Check MDIO not currently being accessed */ 730 /* Check MDIO not currently being accessed */
732 rc = falcon_gmii_wait(efx); 731 rc = falcon_gmii_wait(efx);
@@ -762,7 +761,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
762 } 761 }
763 762
764out: 763out:
765 mutex_unlock(&efx->mdio_lock); 764 mutex_unlock(&nic_data->mdio_lock);
766 return rc; 765 return rc;
767} 766}
768 767
@@ -771,10 +770,11 @@ static int falcon_mdio_read(struct net_device *net_dev,
771 int prtad, int devad, u16 addr) 770 int prtad, int devad, u16 addr)
772{ 771{
773 struct efx_nic *efx = netdev_priv(net_dev); 772 struct efx_nic *efx = netdev_priv(net_dev);
773 struct falcon_nic_data *nic_data = efx->nic_data;
774 efx_oword_t reg; 774 efx_oword_t reg;
775 int rc; 775 int rc;
776 776
777 mutex_lock(&efx->mdio_lock); 777 mutex_lock(&nic_data->mdio_lock);
778 778
779 /* Check MDIO not currently being accessed */ 779 /* Check MDIO not currently being accessed */
780 rc = falcon_gmii_wait(efx); 780 rc = falcon_gmii_wait(efx);
@@ -813,7 +813,7 @@ static int falcon_mdio_read(struct net_device *net_dev,
813 } 813 }
814 814
815out: 815out:
816 mutex_unlock(&efx->mdio_lock); 816 mutex_unlock(&nic_data->mdio_lock);
817 return rc; 817 return rc;
818} 818}
819 819
@@ -841,6 +841,7 @@ static int falcon_probe_port(struct efx_nic *efx)
841 } 841 }
842 842
843 /* Fill out MDIO structure and loopback modes */ 843 /* Fill out MDIO structure and loopback modes */
844 mutex_init(&nic_data->mdio_lock);
844 efx->mdio.mdio_read = falcon_mdio_read; 845 efx->mdio.mdio_read = falcon_mdio_read;
845 efx->mdio.mdio_write = falcon_mdio_write; 846 efx->mdio.mdio_write = falcon_mdio_write;
846 rc = efx->phy_op->probe(efx); 847 rc = efx->phy_op->probe(efx);
@@ -880,6 +881,41 @@ static void falcon_remove_port(struct efx_nic *efx)
880 efx_nic_free_buffer(efx, &efx->stats_buffer); 881 efx_nic_free_buffer(efx, &efx->stats_buffer);
881} 882}
882 883
884/* Global events are basically PHY events */
885static bool
886falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
887{
888 struct efx_nic *efx = channel->efx;
889 struct falcon_nic_data *nic_data = efx->nic_data;
890
891 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
892 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
893 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
894 /* Ignored */
895 return true;
896
897 if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
898 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
899 nic_data->xmac_poll_required = true;
900 return true;
901 }
902
903 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
904 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
905 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
906 netif_err(efx, rx_err, efx->net_dev,
907 "channel %d seen global RX_RESET event. Resetting.\n",
908 channel->channel);
909
910 atomic_inc(&efx->rx_reset);
911 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
912 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
913 return true;
914 }
915
916 return false;
917}
918
883/************************************************************************** 919/**************************************************************************
884 * 920 *
885 * Falcon test code 921 * Falcon test code
@@ -889,6 +925,7 @@ static void falcon_remove_port(struct efx_nic *efx)
889static int 925static int
890falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) 926falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
891{ 927{
928 struct falcon_nic_data *nic_data = efx->nic_data;
892 struct falcon_nvconfig *nvconfig; 929 struct falcon_nvconfig *nvconfig;
893 struct efx_spi_device *spi; 930 struct efx_spi_device *spi;
894 void *region; 931 void *region;
@@ -896,8 +933,11 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
896 __le16 *word, *limit; 933 __le16 *word, *limit;
897 u32 csum; 934 u32 csum;
898 935
899 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom; 936 if (efx_spi_present(&nic_data->spi_flash))
900 if (!spi) 937 spi = &nic_data->spi_flash;
938 else if (efx_spi_present(&nic_data->spi_eeprom))
939 spi = &nic_data->spi_eeprom;
940 else
901 return -EINVAL; 941 return -EINVAL;
902 942
903 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); 943 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
@@ -905,12 +945,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
905 return -ENOMEM; 945 return -ENOMEM;
906 nvconfig = region + FALCON_NVCONFIG_OFFSET; 946 nvconfig = region + FALCON_NVCONFIG_OFFSET;
907 947
908 mutex_lock(&efx->spi_lock); 948 mutex_lock(&nic_data->spi_lock);
909 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); 949 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
910 mutex_unlock(&efx->spi_lock); 950 mutex_unlock(&nic_data->spi_lock);
911 if (rc) { 951 if (rc) {
912 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", 952 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
913 efx->spi_flash ? "flash" : "EEPROM"); 953 efx_spi_present(&nic_data->spi_flash) ?
954 "flash" : "EEPROM");
914 rc = -EIO; 955 rc = -EIO;
915 goto out; 956 goto out;
916 } 957 }
@@ -1012,7 +1053,7 @@ static int falcon_b0_test_registers(struct efx_nic *efx)
1012 1053
1013/* Resets NIC to known state. This routine must be called in process 1054/* Resets NIC to known state. This routine must be called in process
1014 * context and is allowed to sleep. */ 1055 * context and is allowed to sleep. */
1015static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) 1056static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1016{ 1057{
1017 struct falcon_nic_data *nic_data = efx->nic_data; 1058 struct falcon_nic_data *nic_data = efx->nic_data;
1018 efx_oword_t glb_ctl_reg_ker; 1059 efx_oword_t glb_ctl_reg_ker;
@@ -1108,6 +1149,18 @@ fail5:
1108 return rc; 1149 return rc;
1109} 1150}
1110 1151
1152static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1153{
1154 struct falcon_nic_data *nic_data = efx->nic_data;
1155 int rc;
1156
1157 mutex_lock(&nic_data->spi_lock);
1158 rc = __falcon_reset_hw(efx, method);
1159 mutex_unlock(&nic_data->spi_lock);
1160
1161 return rc;
1162}
1163
1111static void falcon_monitor(struct efx_nic *efx) 1164static void falcon_monitor(struct efx_nic *efx)
1112{ 1165{
1113 bool link_changed; 1166 bool link_changed;
@@ -1189,16 +1242,11 @@ static int falcon_reset_sram(struct efx_nic *efx)
1189 return -ETIMEDOUT; 1242 return -ETIMEDOUT;
1190} 1243}
1191 1244
1192static int falcon_spi_device_init(struct efx_nic *efx, 1245static void falcon_spi_device_init(struct efx_nic *efx,
1193 struct efx_spi_device **spi_device_ret, 1246 struct efx_spi_device *spi_device,
1194 unsigned int device_id, u32 device_type) 1247 unsigned int device_id, u32 device_type)
1195{ 1248{
1196 struct efx_spi_device *spi_device;
1197
1198 if (device_type != 0) { 1249 if (device_type != 0) {
1199 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
1200 if (!spi_device)
1201 return -ENOMEM;
1202 spi_device->device_id = device_id; 1250 spi_device->device_id = device_id;
1203 spi_device->size = 1251 spi_device->size =
1204 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); 1252 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
@@ -1215,27 +1263,15 @@ static int falcon_spi_device_init(struct efx_nic *efx,
1215 1 << SPI_DEV_TYPE_FIELD(device_type, 1263 1 << SPI_DEV_TYPE_FIELD(device_type,
1216 SPI_DEV_TYPE_BLOCK_SIZE); 1264 SPI_DEV_TYPE_BLOCK_SIZE);
1217 } else { 1265 } else {
1218 spi_device = NULL; 1266 spi_device->size = 0;
1219 } 1267 }
1220
1221 kfree(*spi_device_ret);
1222 *spi_device_ret = spi_device;
1223 return 0;
1224}
1225
1226static void falcon_remove_spi_devices(struct efx_nic *efx)
1227{
1228 kfree(efx->spi_eeprom);
1229 efx->spi_eeprom = NULL;
1230 kfree(efx->spi_flash);
1231 efx->spi_flash = NULL;
1232} 1268}
1233 1269
1234/* Extract non-volatile configuration */ 1270/* Extract non-volatile configuration */
1235static int falcon_probe_nvconfig(struct efx_nic *efx) 1271static int falcon_probe_nvconfig(struct efx_nic *efx)
1236{ 1272{
1273 struct falcon_nic_data *nic_data = efx->nic_data;
1237 struct falcon_nvconfig *nvconfig; 1274 struct falcon_nvconfig *nvconfig;
1238 int board_rev;
1239 int rc; 1275 int rc;
1240 1276
1241 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); 1277 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
@@ -1243,55 +1279,32 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1243 return -ENOMEM; 1279 return -ENOMEM;
1244 1280
1245 rc = falcon_read_nvram(efx, nvconfig); 1281 rc = falcon_read_nvram(efx, nvconfig);
1246 if (rc == -EINVAL) { 1282 if (rc)
1247 netif_err(efx, probe, efx->net_dev, 1283 goto out;
1248 "NVRAM is invalid therefore using defaults\n"); 1284
1249 efx->phy_type = PHY_TYPE_NONE; 1285 efx->phy_type = nvconfig->board_v2.port0_phy_type;
1250 efx->mdio.prtad = MDIO_PRTAD_NONE; 1286 efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
1251 board_rev = 0; 1287
1252 rc = 0; 1288 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1253 } else if (rc) { 1289 falcon_spi_device_init(
1254 goto fail1; 1290 efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1255 } else { 1291 le32_to_cpu(nvconfig->board_v3
1256 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2; 1292 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
1257 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3; 1293 falcon_spi_device_init(
1258 1294 efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1259 efx->phy_type = v2->port0_phy_type; 1295 le32_to_cpu(nvconfig->board_v3
1260 efx->mdio.prtad = v2->port0_phy_addr; 1296 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
1261 board_rev = le16_to_cpu(v2->board_revision);
1262
1263 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1264 rc = falcon_spi_device_init(
1265 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1266 le32_to_cpu(v3->spi_device_type
1267 [FFE_AB_SPI_DEVICE_FLASH]));
1268 if (rc)
1269 goto fail2;
1270 rc = falcon_spi_device_init(
1271 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1272 le32_to_cpu(v3->spi_device_type
1273 [FFE_AB_SPI_DEVICE_EEPROM]));
1274 if (rc)
1275 goto fail2;
1276 }
1277 } 1297 }
1278 1298
1279 /* Read the MAC addresses */ 1299 /* Read the MAC addresses */
1280 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); 1300 memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
1281 1301
1282 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", 1302 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
1283 efx->phy_type, efx->mdio.prtad); 1303 efx->phy_type, efx->mdio.prtad);
1284 1304
1285 rc = falcon_probe_board(efx, board_rev); 1305 rc = falcon_probe_board(efx,
1286 if (rc) 1306 le16_to_cpu(nvconfig->board_v2.board_revision));
1287 goto fail2; 1307out:
1288
1289 kfree(nvconfig);
1290 return 0;
1291
1292 fail2:
1293 falcon_remove_spi_devices(efx);
1294 fail1:
1295 kfree(nvconfig); 1308 kfree(nvconfig);
1296 return rc; 1309 return rc;
1297} 1310}
@@ -1299,6 +1312,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1299/* Probe all SPI devices on the NIC */ 1312/* Probe all SPI devices on the NIC */
1300static void falcon_probe_spi_devices(struct efx_nic *efx) 1313static void falcon_probe_spi_devices(struct efx_nic *efx)
1301{ 1314{
1315 struct falcon_nic_data *nic_data = efx->nic_data;
1302 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 1316 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1303 int boot_dev; 1317 int boot_dev;
1304 1318
@@ -1327,12 +1341,14 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
1327 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); 1341 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1328 } 1342 }
1329 1343
1344 mutex_init(&nic_data->spi_lock);
1345
1330 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) 1346 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
1331 falcon_spi_device_init(efx, &efx->spi_flash, 1347 falcon_spi_device_init(efx, &nic_data->spi_flash,
1332 FFE_AB_SPI_DEVICE_FLASH, 1348 FFE_AB_SPI_DEVICE_FLASH,
1333 default_flash_type); 1349 default_flash_type);
1334 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) 1350 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
1335 falcon_spi_device_init(efx, &efx->spi_eeprom, 1351 falcon_spi_device_init(efx, &nic_data->spi_eeprom,
1336 FFE_AB_SPI_DEVICE_EEPROM, 1352 FFE_AB_SPI_DEVICE_EEPROM,
1337 large_eeprom_type); 1353 large_eeprom_type);
1338} 1354}
@@ -1397,7 +1413,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
1397 } 1413 }
1398 1414
1399 /* Now we can reset the NIC */ 1415 /* Now we can reset the NIC */
1400 rc = falcon_reset_hw(efx, RESET_TYPE_ALL); 1416 rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
1401 if (rc) { 1417 if (rc) {
1402 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); 1418 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
1403 goto fail3; 1419 goto fail3;
@@ -1419,8 +1435,11 @@ static int falcon_probe_nic(struct efx_nic *efx)
1419 1435
1420 /* Read in the non-volatile configuration */ 1436 /* Read in the non-volatile configuration */
1421 rc = falcon_probe_nvconfig(efx); 1437 rc = falcon_probe_nvconfig(efx);
1422 if (rc) 1438 if (rc) {
1439 if (rc == -EINVAL)
1440 netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
1423 goto fail5; 1441 goto fail5;
1442 }
1424 1443
1425 /* Initialise I2C adapter */ 1444 /* Initialise I2C adapter */
1426 board = falcon_board(efx); 1445 board = falcon_board(efx);
@@ -1452,7 +1471,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
1452 BUG_ON(i2c_del_adapter(&board->i2c_adap)); 1471 BUG_ON(i2c_del_adapter(&board->i2c_adap));
1453 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 1472 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1454 fail5: 1473 fail5:
1455 falcon_remove_spi_devices(efx);
1456 efx_nic_free_buffer(efx, &efx->irq_status); 1474 efx_nic_free_buffer(efx, &efx->irq_status);
1457 fail4: 1475 fail4:
1458 fail3: 1476 fail3:
@@ -1606,10 +1624,9 @@ static void falcon_remove_nic(struct efx_nic *efx)
1606 BUG_ON(rc); 1624 BUG_ON(rc);
1607 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 1625 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1608 1626
1609 falcon_remove_spi_devices(efx);
1610 efx_nic_free_buffer(efx, &efx->irq_status); 1627 efx_nic_free_buffer(efx, &efx->irq_status);
1611 1628
1612 falcon_reset_hw(efx, RESET_TYPE_ALL); 1629 __falcon_reset_hw(efx, RESET_TYPE_ALL);
1613 1630
1614 /* Release the second function after the reset */ 1631 /* Release the second function after the reset */
1615 if (nic_data->pci_dev2) { 1632 if (nic_data->pci_dev2) {
@@ -1720,6 +1737,7 @@ struct efx_nic_type falcon_a1_nic_type = {
1720 .reset = falcon_reset_hw, 1737 .reset = falcon_reset_hw,
1721 .probe_port = falcon_probe_port, 1738 .probe_port = falcon_probe_port,
1722 .remove_port = falcon_remove_port, 1739 .remove_port = falcon_remove_port,
1740 .handle_global_event = falcon_handle_global_event,
1723 .prepare_flush = falcon_prepare_flush, 1741 .prepare_flush = falcon_prepare_flush,
1724 .update_stats = falcon_update_nic_stats, 1742 .update_stats = falcon_update_nic_stats,
1725 .start_stats = falcon_start_nic_stats, 1743 .start_stats = falcon_start_nic_stats,
@@ -1760,6 +1778,7 @@ struct efx_nic_type falcon_b0_nic_type = {
1760 .reset = falcon_reset_hw, 1778 .reset = falcon_reset_hw,
1761 .probe_port = falcon_probe_port, 1779 .probe_port = falcon_probe_port,
1762 .remove_port = falcon_remove_port, 1780 .remove_port = falcon_remove_port,
1781 .handle_global_event = falcon_handle_global_event,
1763 .prepare_flush = falcon_prepare_flush, 1782 .prepare_flush = falcon_prepare_flush,
1764 .update_stats = falcon_update_nic_stats, 1783 .update_stats = falcon_update_nic_stats,
1765 .start_stats = falcon_start_nic_stats, 1784 .start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index cfc6a5b5a47..2dd16f0b3ce 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -13,8 +13,6 @@
13#include "phy.h" 13#include "phy.h"
14#include "efx.h" 14#include "efx.h"
15#include "nic.h" 15#include "nic.h"
16#include "regs.h"
17#include "io.h"
18#include "workarounds.h" 16#include "workarounds.h"
19 17
20/* Macros for unpacking the board revision */ 18/* Macros for unpacking the board revision */
@@ -30,17 +28,28 @@
30#define FALCON_BOARD_SFN4112F 0x52 28#define FALCON_BOARD_SFN4112F 0x52
31 29
32/* Board temperature is about 15°C above ambient when air flow is 30/* Board temperature is about 15°C above ambient when air flow is
33 * limited. */ 31 * limited. The maximum acceptable ambient temperature varies
32 * depending on the PHY specifications but the critical temperature
33 * above which we should shut down to avoid damage is 80°C. */
34#define FALCON_BOARD_TEMP_BIAS 15 34#define FALCON_BOARD_TEMP_BIAS 15
35#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
35 36
36/* SFC4000 datasheet says: 'The maximum permitted junction temperature 37/* SFC4000 datasheet says: 'The maximum permitted junction temperature
37 * is 125°C; the thermal design of the environment for the SFC4000 38 * is 125°C; the thermal design of the environment for the SFC4000
38 * should aim to keep this well below 100°C.' */ 39 * should aim to keep this well below 100°C.' */
40#define FALCON_JUNC_TEMP_MIN 0
39#define FALCON_JUNC_TEMP_MAX 90 41#define FALCON_JUNC_TEMP_MAX 90
42#define FALCON_JUNC_TEMP_CRIT 125
40 43
41/***************************************************************************** 44/*****************************************************************************
42 * Support for LM87 sensor chip used on several boards 45 * Support for LM87 sensor chip used on several boards
43 */ 46 */
47#define LM87_REG_TEMP_HW_INT_LOCK 0x13
48#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
49#define LM87_REG_TEMP_HW_INT 0x17
50#define LM87_REG_TEMP_HW_EXT 0x18
51#define LM87_REG_TEMP_EXT1 0x26
52#define LM87_REG_TEMP_INT 0x27
44#define LM87_REG_ALARMS1 0x41 53#define LM87_REG_ALARMS1 0x41
45#define LM87_REG_ALARMS2 0x42 54#define LM87_REG_ALARMS2 0x42
46#define LM87_IN_LIMITS(nr, _min, _max) \ 55#define LM87_IN_LIMITS(nr, _min, _max) \
@@ -57,6 +66,27 @@
57 66
58#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE) 67#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
59 68
69static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
70{
71 while (*reg_values) {
72 u8 reg = *reg_values++;
73 u8 value = *reg_values++;
74 int rc = i2c_smbus_write_byte_data(client, reg, value);
75 if (rc)
76 return rc;
77 }
78 return 0;
79}
80
81static const u8 falcon_lm87_common_regs[] = {
82 LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
83 LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
84 LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
85 LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
86 LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
87 0
88};
89
60static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, 90static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
61 const u8 *reg_values) 91 const u8 *reg_values)
62{ 92{
@@ -67,13 +97,16 @@ static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
67 if (!client) 97 if (!client)
68 return -EIO; 98 return -EIO;
69 99
70 while (*reg_values) { 100 /* Read-to-clear alarm/interrupt status */
71 u8 reg = *reg_values++; 101 i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
72 u8 value = *reg_values++; 102 i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
73 rc = i2c_smbus_write_byte_data(client, reg, value); 103
74 if (rc) 104 rc = efx_poke_lm87(client, reg_values);
75 goto err; 105 if (rc)
76 } 106 goto err;
107 rc = efx_poke_lm87(client, falcon_lm87_common_regs);
108 if (rc)
109 goto err;
77 110
78 board->hwmon_client = client; 111 board->hwmon_client = client;
79 return 0; 112 return 0;
@@ -91,36 +124,56 @@ static void efx_fini_lm87(struct efx_nic *efx)
91static int efx_check_lm87(struct efx_nic *efx, unsigned mask) 124static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
92{ 125{
93 struct i2c_client *client = falcon_board(efx)->hwmon_client; 126 struct i2c_client *client = falcon_board(efx)->hwmon_client;
94 s32 alarms1, alarms2; 127 bool temp_crit, elec_fault, is_failure;
128 u16 alarms;
129 s32 reg;
95 130
96 /* If link is up then do not monitor temperature */ 131 /* If link is up then do not monitor temperature */
97 if (EFX_WORKAROUND_7884(efx) && efx->link_state.up) 132 if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
98 return 0; 133 return 0;
99 134
100 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); 135 reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
101 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); 136 if (reg < 0)
102 if (alarms1 < 0) 137 return reg;
103 return alarms1; 138 alarms = reg;
104 if (alarms2 < 0) 139 reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
105 return alarms2; 140 if (reg < 0)
106 alarms1 &= mask; 141 return reg;
107 alarms2 &= mask >> 8; 142 alarms |= reg << 8;
108 if (alarms1 || alarms2) { 143 alarms &= mask;
144
145 temp_crit = false;
146 if (alarms & LM87_ALARM_TEMP_INT) {
147 reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
148 if (reg < 0)
149 return reg;
150 if (reg > FALCON_BOARD_TEMP_CRIT)
151 temp_crit = true;
152 }
153 if (alarms & LM87_ALARM_TEMP_EXT1) {
154 reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
155 if (reg < 0)
156 return reg;
157 if (reg > FALCON_JUNC_TEMP_CRIT)
158 temp_crit = true;
159 }
160 elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
161 is_failure = temp_crit || elec_fault;
162
163 if (alarms)
109 netif_err(efx, hw, efx->net_dev, 164 netif_err(efx, hw, efx->net_dev,
110 "LM87 detected a hardware failure (status %02x:%02x)" 165 "LM87 detected a hardware %s (status %02x:%02x)"
111 "%s%s%s\n", 166 "%s%s%s%s\n",
112 alarms1, alarms2, 167 is_failure ? "failure" : "problem",
113 (alarms1 & LM87_ALARM_TEMP_INT) ? 168 alarms & 0xff, alarms >> 8,
169 (alarms & LM87_ALARM_TEMP_INT) ?
114 "; board is overheating" : "", 170 "; board is overheating" : "",
115 (alarms1 & LM87_ALARM_TEMP_EXT1) ? 171 (alarms & LM87_ALARM_TEMP_EXT1) ?
116 "; controller is overheating" : "", 172 "; controller is overheating" : "",
117 (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1) 173 temp_crit ? "; reached critical temperature" : "",
118 || alarms2) ? 174 elec_fault ? "; electrical fault" : "");
119 "; electrical fault" : "");
120 return -ERANGE;
121 }
122 175
123 return 0; 176 return is_failure ? -ERANGE : 0;
124} 177}
125 178
126#else /* !CONFIG_SENSORS_LM87 */ 179#else /* !CONFIG_SENSORS_LM87 */
@@ -325,7 +378,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
325 new_mode = old_mode & ~PHY_MODE_SPECIAL; 378 new_mode = old_mode & ~PHY_MODE_SPECIAL;
326 else 379 else
327 new_mode = PHY_MODE_SPECIAL; 380 new_mode = PHY_MODE_SPECIAL;
328 if (old_mode == new_mode) { 381 if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
329 err = 0; 382 err = 0;
330 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { 383 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
331 err = -EBUSY; 384 err = -EBUSY;
@@ -362,10 +415,11 @@ static void sfe4001_fini(struct efx_nic *efx)
362 415
363static int sfe4001_check_hw(struct efx_nic *efx) 416static int sfe4001_check_hw(struct efx_nic *efx)
364{ 417{
418 struct falcon_nic_data *nic_data = efx->nic_data;
365 s32 status; 419 s32 status;
366 420
367 /* If XAUI link is up then do not monitor */ 421 /* If XAUI link is up then do not monitor */
368 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required) 422 if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
369 return 0; 423 return 0;
370 424
371 /* Check the powered status of the PHY. Lack of power implies that 425 /* Check the powered status of the PHY. Lack of power implies that
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index b31f595ebb5..b49e8439464 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -16,7 +16,6 @@
16#include "io.h" 16#include "io.h"
17#include "mac.h" 17#include "mac.h"
18#include "mdio_10g.h" 18#include "mdio_10g.h"
19#include "phy.h"
20#include "workarounds.h" 19#include "workarounds.h"
21 20
22/************************************************************************** 21/**************************************************************************
@@ -88,6 +87,7 @@ int falcon_reset_xaui(struct efx_nic *efx)
88 87
89static void falcon_ack_status_intr(struct efx_nic *efx) 88static void falcon_ack_status_intr(struct efx_nic *efx)
90{ 89{
90 struct falcon_nic_data *nic_data = efx->nic_data;
91 efx_oword_t reg; 91 efx_oword_t reg;
92 92
93 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) 93 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
@@ -99,7 +99,7 @@ static void falcon_ack_status_intr(struct efx_nic *efx)
99 99
100 /* We can only use this interrupt to signal the negative edge of 100 /* We can only use this interrupt to signal the negative edge of
101 * xaui_align [we have to poll the positive edge]. */ 101 * xaui_align [we have to poll the positive edge]. */
102 if (efx->xmac_poll_required) 102 if (nic_data->xmac_poll_required)
103 return; 103 return;
104 104
105 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK); 105 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
@@ -277,12 +277,14 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
277 277
278static int falcon_reconfigure_xmac(struct efx_nic *efx) 278static int falcon_reconfigure_xmac(struct efx_nic *efx)
279{ 279{
280 struct falcon_nic_data *nic_data = efx->nic_data;
281
280 falcon_reconfigure_xgxs_core(efx); 282 falcon_reconfigure_xgxs_core(efx);
281 falcon_reconfigure_xmac_core(efx); 283 falcon_reconfigure_xmac_core(efx);
282 284
283 falcon_reconfigure_mac_wrapper(efx); 285 falcon_reconfigure_mac_wrapper(efx);
284 286
285 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); 287 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
286 falcon_ack_status_intr(efx); 288 falcon_ack_status_intr(efx);
287 289
288 return 0; 290 return 0;
@@ -350,11 +352,13 @@ static void falcon_update_stats_xmac(struct efx_nic *efx)
350 352
351void falcon_poll_xmac(struct efx_nic *efx) 353void falcon_poll_xmac(struct efx_nic *efx)
352{ 354{
355 struct falcon_nic_data *nic_data = efx->nic_data;
356
353 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || 357 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
354 !efx->xmac_poll_required) 358 !nic_data->xmac_poll_required)
355 return; 359 return;
356 360
357 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 361 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
358 falcon_ack_status_intr(efx); 362 falcon_ack_status_intr(efx);
359} 363}
360 364
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index 52cb6082b91..44500b54fd5 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -428,10 +428,9 @@ int efx_probe_filters(struct efx_nic *efx)
428 GFP_KERNEL); 428 GFP_KERNEL);
429 if (!table->used_bitmap) 429 if (!table->used_bitmap)
430 goto fail; 430 goto fail;
431 table->spec = vmalloc(table->size * sizeof(*table->spec)); 431 table->spec = vzalloc(table->size * sizeof(*table->spec));
432 if (!table->spec) 432 if (!table->spec)
433 goto fail; 433 goto fail;
434 memset(table->spec, 0, table->size * sizeof(*table->spec));
435 } 434 }
436 435
437 return 0; 436 return 0;
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 12cf910c2ce..b716e827b29 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -381,7 +381,7 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
381 -rc); 381 -rc);
382 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 382 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
383 } else 383 } else
384 netif_err(efx, hw, efx->net_dev, 384 netif_dbg(efx, hw, efx->net_dev,
385 "MC command 0x%x inlen %d failed rc=%d\n", 385 "MC command 0x%x inlen %d failed rc=%d\n",
386 cmd, (int)inlen, -rc); 386 cmd, (int)inlen, -rc);
387 } 387 }
@@ -463,6 +463,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
463 if (mcdi->mode == MCDI_MODE_EVENTS) { 463 if (mcdi->mode == MCDI_MODE_EVENTS) {
464 mcdi->resprc = rc; 464 mcdi->resprc = rc;
465 mcdi->resplen = 0; 465 mcdi->resplen = 0;
466 ++mcdi->credits;
466 } 467 }
467 } else 468 } else
468 /* Nobody was waiting for an MCDI request, so trigger a reset */ 469 /* Nobody was waiting for an MCDI request, so trigger a reset */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index c992742446b..0e97eed663c 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -16,7 +16,6 @@
16#include "phy.h" 16#include "phy.h"
17#include "mcdi.h" 17#include "mcdi.h"
18#include "mcdi_pcol.h" 18#include "mcdi_pcol.h"
19#include "mdio_10g.h"
20#include "nic.h" 19#include "nic.h"
21#include "selftest.h" 20#include "selftest.h"
22 21
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 98d94602042..56b0266b441 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -15,7 +15,6 @@
15#include "net_driver.h" 15#include "net_driver.h"
16#include "mdio_10g.h" 16#include "mdio_10g.h"
17#include "workarounds.h" 17#include "workarounds.h"
18#include "nic.h"
19 18
20unsigned efx_mdio_id_oui(u32 id) 19unsigned efx_mdio_id_oui(u32 id)
21{ 20{
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 02e54b4f701..d38627448c2 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -321,14 +321,15 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
321 struct efx_mtd *efx_mtd = mtd->priv; 321 struct efx_mtd *efx_mtd = mtd->priv;
322 const struct efx_spi_device *spi = efx_mtd->spi; 322 const struct efx_spi_device *spi = efx_mtd->spi;
323 struct efx_nic *efx = efx_mtd->efx; 323 struct efx_nic *efx = efx_mtd->efx;
324 struct falcon_nic_data *nic_data = efx->nic_data;
324 int rc; 325 int rc;
325 326
326 rc = mutex_lock_interruptible(&efx->spi_lock); 327 rc = mutex_lock_interruptible(&nic_data->spi_lock);
327 if (rc) 328 if (rc)
328 return rc; 329 return rc;
329 rc = falcon_spi_read(efx, spi, part->offset + start, len, 330 rc = falcon_spi_read(efx, spi, part->offset + start, len,
330 retlen, buffer); 331 retlen, buffer);
331 mutex_unlock(&efx->spi_lock); 332 mutex_unlock(&nic_data->spi_lock);
332 return rc; 333 return rc;
333} 334}
334 335
@@ -337,13 +338,14 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
337 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); 338 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
338 struct efx_mtd *efx_mtd = mtd->priv; 339 struct efx_mtd *efx_mtd = mtd->priv;
339 struct efx_nic *efx = efx_mtd->efx; 340 struct efx_nic *efx = efx_mtd->efx;
341 struct falcon_nic_data *nic_data = efx->nic_data;
340 int rc; 342 int rc;
341 343
342 rc = mutex_lock_interruptible(&efx->spi_lock); 344 rc = mutex_lock_interruptible(&nic_data->spi_lock);
343 if (rc) 345 if (rc)
344 return rc; 346 return rc;
345 rc = efx_spi_erase(part, part->offset + start, len); 347 rc = efx_spi_erase(part, part->offset + start, len);
346 mutex_unlock(&efx->spi_lock); 348 mutex_unlock(&nic_data->spi_lock);
347 return rc; 349 return rc;
348} 350}
349 351
@@ -354,14 +356,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
354 struct efx_mtd *efx_mtd = mtd->priv; 356 struct efx_mtd *efx_mtd = mtd->priv;
355 const struct efx_spi_device *spi = efx_mtd->spi; 357 const struct efx_spi_device *spi = efx_mtd->spi;
356 struct efx_nic *efx = efx_mtd->efx; 358 struct efx_nic *efx = efx_mtd->efx;
359 struct falcon_nic_data *nic_data = efx->nic_data;
357 int rc; 360 int rc;
358 361
359 rc = mutex_lock_interruptible(&efx->spi_lock); 362 rc = mutex_lock_interruptible(&nic_data->spi_lock);
360 if (rc) 363 if (rc)
361 return rc; 364 return rc;
362 rc = falcon_spi_write(efx, spi, part->offset + start, len, 365 rc = falcon_spi_write(efx, spi, part->offset + start, len,
363 retlen, buffer); 366 retlen, buffer);
364 mutex_unlock(&efx->spi_lock); 367 mutex_unlock(&nic_data->spi_lock);
365 return rc; 368 return rc;
366} 369}
367 370
@@ -370,11 +373,12 @@ static int falcon_mtd_sync(struct mtd_info *mtd)
370 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); 373 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
371 struct efx_mtd *efx_mtd = mtd->priv; 374 struct efx_mtd *efx_mtd = mtd->priv;
372 struct efx_nic *efx = efx_mtd->efx; 375 struct efx_nic *efx = efx_mtd->efx;
376 struct falcon_nic_data *nic_data = efx->nic_data;
373 int rc; 377 int rc;
374 378
375 mutex_lock(&efx->spi_lock); 379 mutex_lock(&nic_data->spi_lock);
376 rc = efx_spi_slow_wait(part, true); 380 rc = efx_spi_slow_wait(part, true);
377 mutex_unlock(&efx->spi_lock); 381 mutex_unlock(&nic_data->spi_lock);
378 return rc; 382 return rc;
379} 383}
380 384
@@ -387,35 +391,67 @@ static struct efx_mtd_ops falcon_mtd_ops = {
387 391
388static int falcon_mtd_probe(struct efx_nic *efx) 392static int falcon_mtd_probe(struct efx_nic *efx)
389{ 393{
390 struct efx_spi_device *spi = efx->spi_flash; 394 struct falcon_nic_data *nic_data = efx->nic_data;
395 struct efx_spi_device *spi;
391 struct efx_mtd *efx_mtd; 396 struct efx_mtd *efx_mtd;
392 int rc; 397 int rc = -ENODEV;
393 398
394 ASSERT_RTNL(); 399 ASSERT_RTNL();
395 400
396 if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START) 401 spi = &nic_data->spi_flash;
397 return -ENODEV; 402 if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
398 403 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
399 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), 404 GFP_KERNEL);
400 GFP_KERNEL); 405 if (!efx_mtd)
401 if (!efx_mtd) 406 return -ENOMEM;
402 return -ENOMEM; 407
403 408 efx_mtd->spi = spi;
404 efx_mtd->spi = spi; 409 efx_mtd->name = "flash";
405 efx_mtd->name = "flash"; 410 efx_mtd->ops = &falcon_mtd_ops;
406 efx_mtd->ops = &falcon_mtd_ops; 411
412 efx_mtd->n_parts = 1;
413 efx_mtd->part[0].mtd.type = MTD_NORFLASH;
414 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
415 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
416 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
417 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
418 efx_mtd->part[0].type_name = "sfc_flash_bootrom";
419
420 rc = efx_mtd_probe_device(efx, efx_mtd);
421 if (rc) {
422 kfree(efx_mtd);
423 return rc;
424 }
425 }
407 426
408 efx_mtd->n_parts = 1; 427 spi = &nic_data->spi_eeprom;
409 efx_mtd->part[0].mtd.type = MTD_NORFLASH; 428 if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
410 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH; 429 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
411 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; 430 GFP_KERNEL);
412 efx_mtd->part[0].mtd.erasesize = spi->erase_size; 431 if (!efx_mtd)
413 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START; 432 return -ENOMEM;
414 efx_mtd->part[0].type_name = "sfc_flash_bootrom"; 433
434 efx_mtd->spi = spi;
435 efx_mtd->name = "EEPROM";
436 efx_mtd->ops = &falcon_mtd_ops;
437
438 efx_mtd->n_parts = 1;
439 efx_mtd->part[0].mtd.type = MTD_RAM;
440 efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
441 efx_mtd->part[0].mtd.size =
442 min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
443 EFX_EEPROM_BOOTCONFIG_START;
444 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
445 efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
446 efx_mtd->part[0].type_name = "sfc_bootconfig";
447
448 rc = efx_mtd_probe_device(efx, efx_mtd);
449 if (rc) {
450 kfree(efx_mtd);
451 return rc;
452 }
453 }
415 454
416 rc = efx_mtd_probe_device(efx, efx_mtd);
417 if (rc)
418 kfree(efx_mtd);
419 return rc; 455 return rc;
420} 456}
421 457
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index b137c889152..4c12332434b 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -648,6 +648,7 @@ struct efx_filter_state;
648 * @n_tx_channels: Number of channels used for TX 648 * @n_tx_channels: Number of channels used for TX
649 * @rx_buffer_len: RX buffer length 649 * @rx_buffer_len: RX buffer length
650 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 650 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
651 * @rx_hash_key: Toeplitz hash key for RSS
651 * @rx_indir_table: Indirection table for RSS 652 * @rx_indir_table: Indirection table for RSS
652 * @int_error_count: Number of internal errors seen recently 653 * @int_error_count: Number of internal errors seen recently
653 * @int_error_expire: Time at which error count will be expired 654 * @int_error_expire: Time at which error count will be expired
@@ -658,11 +659,6 @@ struct efx_filter_state;
658 * to verify that an interrupt has occurred. 659 * to verify that an interrupt has occurred.
659 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 660 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
660 * @fatal_irq_level: IRQ level (bit number) used for serious errors 661 * @fatal_irq_level: IRQ level (bit number) used for serious errors
661 * @spi_flash: SPI flash device
662 * This field will be %NULL if no flash device is present (or for Siena).
663 * @spi_eeprom: SPI EEPROM device
664 * This field will be %NULL if no EEPROM device is present (or for Siena).
665 * @spi_lock: SPI bus lock
666 * @mtd_list: List of MTDs attached to the NIC 662 * @mtd_list: List of MTDs attached to the NIC
667 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 663 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
668 * @nic_data: Hardware dependant state 664 * @nic_data: Hardware dependant state
@@ -683,15 +679,12 @@ struct efx_filter_state;
683 * @stats_buffer: DMA buffer for statistics 679 * @stats_buffer: DMA buffer for statistics
684 * @stats_lock: Statistics update lock. Serialises statistics fetches 680 * @stats_lock: Statistics update lock. Serialises statistics fetches
685 * @mac_op: MAC interface 681 * @mac_op: MAC interface
686 * @mac_address: Permanent MAC address
687 * @phy_type: PHY type 682 * @phy_type: PHY type
688 * @mdio_lock: MDIO lock
689 * @phy_op: PHY interface 683 * @phy_op: PHY interface
690 * @phy_data: PHY private data (including PHY-specific stats) 684 * @phy_data: PHY private data (including PHY-specific stats)
691 * @mdio: PHY MDIO interface 685 * @mdio: PHY MDIO interface
692 * @mdio_bus: PHY MDIO bus ID (only used by Siena) 686 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
693 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 687 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
694 * @xmac_poll_required: XMAC link state needs polling
695 * @link_advertising: Autonegotiation advertising flags 688 * @link_advertising: Autonegotiation advertising flags
696 * @link_state: Current state of the link 689 * @link_state: Current state of the link
697 * @n_link_state_changes: Number of times the link has changed state 690 * @n_link_state_changes: Number of times the link has changed state
@@ -748,9 +741,6 @@ struct efx_nic {
748 unsigned irq_zero_count; 741 unsigned irq_zero_count;
749 unsigned fatal_irq_level; 742 unsigned fatal_irq_level;
750 743
751 struct efx_spi_device *spi_flash;
752 struct efx_spi_device *spi_eeprom;
753 struct mutex spi_lock;
754#ifdef CONFIG_SFC_MTD 744#ifdef CONFIG_SFC_MTD
755 struct list_head mtd_list; 745 struct list_head mtd_list;
756#endif 746#endif
@@ -773,17 +763,14 @@ struct efx_nic {
773 spinlock_t stats_lock; 763 spinlock_t stats_lock;
774 764
775 struct efx_mac_operations *mac_op; 765 struct efx_mac_operations *mac_op;
776 unsigned char mac_address[ETH_ALEN];
777 766
778 unsigned int phy_type; 767 unsigned int phy_type;
779 struct mutex mdio_lock;
780 struct efx_phy_operations *phy_op; 768 struct efx_phy_operations *phy_op;
781 void *phy_data; 769 void *phy_data;
782 struct mdio_if_info mdio; 770 struct mdio_if_info mdio;
783 unsigned int mdio_bus; 771 unsigned int mdio_bus;
784 enum efx_phy_mode phy_mode; 772 enum efx_phy_mode phy_mode;
785 773
786 bool xmac_poll_required;
787 u32 link_advertising; 774 u32 link_advertising;
788 struct efx_link_state link_state; 775 struct efx_link_state link_state;
789 unsigned int n_link_state_changes; 776 unsigned int n_link_state_changes;
@@ -831,6 +818,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
831 * be called while the controller is uninitialised. 818 * be called while the controller is uninitialised.
832 * @probe_port: Probe the MAC and PHY 819 * @probe_port: Probe the MAC and PHY
833 * @remove_port: Free resources allocated by probe_port() 820 * @remove_port: Free resources allocated by probe_port()
821 * @handle_global_event: Handle a "global" event (may be %NULL)
834 * @prepare_flush: Prepare the hardware for flushing the DMA queues 822 * @prepare_flush: Prepare the hardware for flushing the DMA queues
835 * @update_stats: Update statistics not provided by event handling 823 * @update_stats: Update statistics not provided by event handling
836 * @start_stats: Start the regular fetching of statistics 824 * @start_stats: Start the regular fetching of statistics
@@ -875,6 +863,7 @@ struct efx_nic_type {
875 int (*reset)(struct efx_nic *efx, enum reset_type method); 863 int (*reset)(struct efx_nic *efx, enum reset_type method);
876 int (*probe_port)(struct efx_nic *efx); 864 int (*probe_port)(struct efx_nic *efx);
877 void (*remove_port)(struct efx_nic *efx); 865 void (*remove_port)(struct efx_nic *efx);
866 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
878 void (*prepare_flush)(struct efx_nic *efx); 867 void (*prepare_flush)(struct efx_nic *efx);
879 void (*update_stats)(struct efx_nic *efx); 868 void (*update_stats)(struct efx_nic *efx);
880 void (*start_stats)(struct efx_nic *efx); 869 void (*start_stats)(struct efx_nic *efx);
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 67cb0c96838..399b12abe2f 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -894,46 +894,6 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
894 channel->channel, EFX_QWORD_VAL(*event)); 894 channel->channel, EFX_QWORD_VAL(*event));
895} 895}
896 896
897/* Global events are basically PHY events */
898static void
899efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
900{
901 struct efx_nic *efx = channel->efx;
902 bool handled = false;
903
904 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
905 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
906 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
907 /* Ignored */
908 handled = true;
909 }
910
911 if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
912 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
913 efx->xmac_poll_required = true;
914 handled = true;
915 }
916
917 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
918 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
919 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
920 netif_err(efx, rx_err, efx->net_dev,
921 "channel %d seen global RX_RESET event. Resetting.\n",
922 channel->channel);
923
924 atomic_inc(&efx->rx_reset);
925 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
926 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
927 handled = true;
928 }
929
930 if (!handled)
931 netif_err(efx, hw, efx->net_dev,
932 "channel %d unknown global event "
933 EFX_QWORD_FMT "\n", channel->channel,
934 EFX_QWORD_VAL(*event));
935}
936
937static void 897static void
938efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 898efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
939{ 899{
@@ -1050,15 +1010,17 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1050 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1010 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1051 efx_handle_generated_event(channel, &event); 1011 efx_handle_generated_event(channel, &event);
1052 break; 1012 break;
1053 case FSE_AZ_EV_CODE_GLOBAL_EV:
1054 efx_handle_global_event(channel, &event);
1055 break;
1056 case FSE_AZ_EV_CODE_DRIVER_EV: 1013 case FSE_AZ_EV_CODE_DRIVER_EV:
1057 efx_handle_driver_event(channel, &event); 1014 efx_handle_driver_event(channel, &event);
1058 break; 1015 break;
1059 case FSE_CZ_EV_CODE_MCDI_EV: 1016 case FSE_CZ_EV_CODE_MCDI_EV:
1060 efx_mcdi_process_event(channel, &event); 1017 efx_mcdi_process_event(channel, &event);
1061 break; 1018 break;
1019 case FSE_AZ_EV_CODE_GLOBAL_EV:
1020 if (efx->type->handle_global_event &&
1021 efx->type->handle_global_event(channel, &event))
1022 break;
1023 /* else fall through */
1062 default: 1024 default:
1063 netif_err(channel->efx, hw, channel->efx->net_dev, 1025 netif_err(channel->efx, hw, channel->efx->net_dev,
1064 "channel %d unknown event type %d (data " 1026 "channel %d unknown event type %d (data "
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 0438dc98722..eb0586925b5 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -15,6 +15,7 @@
15#include "net_driver.h" 15#include "net_driver.h"
16#include "efx.h" 16#include "efx.h"
17#include "mcdi.h" 17#include "mcdi.h"
18#include "spi.h"
18 19
19/* 20/*
20 * Falcon hardware control 21 * Falcon hardware control
@@ -113,6 +114,11 @@ struct falcon_board {
113 * @stats_pending: Is there a pending DMA of MAC statistics. 114 * @stats_pending: Is there a pending DMA of MAC statistics.
114 * @stats_timer: A timer for regularly fetching MAC statistics. 115 * @stats_timer: A timer for regularly fetching MAC statistics.
115 * @stats_dma_done: Pointer to the flag which indicates DMA completion. 116 * @stats_dma_done: Pointer to the flag which indicates DMA completion.
117 * @spi_flash: SPI flash device
118 * @spi_eeprom: SPI EEPROM device
119 * @spi_lock: SPI bus lock
120 * @mdio_lock: MDIO bus lock
121 * @xmac_poll_required: XMAC link state needs polling
116 */ 122 */
117struct falcon_nic_data { 123struct falcon_nic_data {
118 struct pci_dev *pci_dev2; 124 struct pci_dev *pci_dev2;
@@ -121,6 +127,11 @@ struct falcon_nic_data {
121 bool stats_pending; 127 bool stats_pending;
122 struct timer_list stats_timer; 128 struct timer_list stats_timer;
123 u32 *stats_dma_done; 129 u32 *stats_dma_done;
130 struct efx_spi_device spi_flash;
131 struct efx_spi_device spi_eeprom;
132 struct mutex spi_lock;
133 struct mutex mdio_lock;
134 bool xmac_poll_required;
124}; 135};
125 136
126static inline struct falcon_board *falcon_board(struct efx_nic *efx) 137static inline struct falcon_board *falcon_board(struct efx_nic *efx)
@@ -135,7 +146,6 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
135 * @fw_build: Firmware build number 146 * @fw_build: Firmware build number
136 * @mcdi: Management-Controller-to-Driver Interface 147 * @mcdi: Management-Controller-to-Driver Interface
137 * @wol_filter_id: Wake-on-LAN packet filter id 148 * @wol_filter_id: Wake-on-LAN packet filter id
138 * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
139 */ 149 */
140struct siena_nic_data { 150struct siena_nic_data {
141 u64 fw_version; 151 u64 fw_version;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 68813d1d85f..ea3ae008931 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -41,6 +41,8 @@
41#define PCS_UC_STATUS_LBN 0 41#define PCS_UC_STATUS_LBN 0
42#define PCS_UC_STATUS_WIDTH 8 42#define PCS_UC_STATUS_WIDTH 8
43#define PCS_UC_STATUS_FW_SAVE 0x20 43#define PCS_UC_STATUS_FW_SAVE 0x20
44#define PMA_PMD_MODE_REG 0xc301
45#define PMA_PMD_RXIN_SEL_LBN 6
44#define PMA_PMD_FTX_CTRL2_REG 0xc309 46#define PMA_PMD_FTX_CTRL2_REG 0xc309
45#define PMA_PMD_FTX_STATIC_LBN 13 47#define PMA_PMD_FTX_STATIC_LBN 13
46#define PMA_PMD_VEND1_REG 0xc001 48#define PMA_PMD_VEND1_REG 0xc001
@@ -282,6 +284,10 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
282 * slow) reload of the firmware image (the microcontroller's code 284 * slow) reload of the firmware image (the microcontroller's code
283 * memory is not affected by the microcontroller reset). */ 285 * memory is not affected by the microcontroller reset). */
284 efx_mdio_write(efx, 1, 0xc317, 0x00ff); 286 efx_mdio_write(efx, 1, 0xc317, 0x00ff);
287 /* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
288 * restart doesn't reset it. We need to do that ourselves. */
289 efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
290 1 << PMA_PMD_RXIN_SEL_LBN, false);
285 efx_mdio_write(efx, 1, 0xc300, 0x0002); 291 efx_mdio_write(efx, 1, 0xc300, 0x0002);
286 msleep(20); 292 msleep(20);
287 293
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 45236f58a25..bf845617644 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -194,13 +194,7 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
194 194
195static int siena_probe_nvconfig(struct efx_nic *efx) 195static int siena_probe_nvconfig(struct efx_nic *efx)
196{ 196{
197 int rc; 197 return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
198
199 rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL);
200 if (rc)
201 return rc;
202
203 return 0;
204} 198}
205 199
206static int siena_probe_nic(struct efx_nic *efx) 200static int siena_probe_nic(struct efx_nic *efx)
@@ -562,7 +556,7 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
562 if (nic_data->wol_filter_id != -1) 556 if (nic_data->wol_filter_id != -1)
563 efx_mcdi_wol_filter_remove(efx, 557 efx_mcdi_wol_filter_remove(efx,
564 nic_data->wol_filter_id); 558 nic_data->wol_filter_id);
565 rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address, 559 rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
566 &nic_data->wol_filter_id); 560 &nic_data->wol_filter_id);
567 if (rc) 561 if (rc)
568 goto fail; 562 goto fail;
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 8bf4fce0813..879b7f6bde3 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -61,6 +61,11 @@ struct efx_spi_device {
61 unsigned int block_size; 61 unsigned int block_size;
62}; 62};
63 63
64static inline bool efx_spi_present(const struct efx_spi_device *spi)
65{
66 return spi->size != 0;
67}
68
64int falcon_spi_cmd(struct efx_nic *efx, 69int falcon_spi_cmd(struct efx_nic *efx,
65 const struct efx_spi_device *spi, unsigned int command, 70 const struct efx_spi_device *spi, unsigned int command,
66 int address, const void* in, void *out, size_t len); 71 int address, const void* in, void *out, size_t len);
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 1bc6c48c96e..f102912eba9 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -15,9 +15,7 @@
15#include "mdio_10g.h" 15#include "mdio_10g.h"
16#include "nic.h" 16#include "nic.h"
17#include "phy.h" 17#include "phy.h"
18#include "regs.h"
19#include "workarounds.h" 18#include "workarounds.h"
20#include "selftest.h"
21 19
22/* We expect these MMDs to be in the package. */ 20/* We expect these MMDs to be in the package. */
23#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \ 21#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 11726989fe2..03194f7c095 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -401,6 +401,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
401{ 401{
402 unsigned fill_level; 402 unsigned fill_level;
403 struct efx_nic *efx = tx_queue->efx; 403 struct efx_nic *efx = tx_queue->efx;
404 struct netdev_queue *queue;
404 405
405 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 406 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
406 407
@@ -417,12 +418,15 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
417 418
418 /* Do this under netif_tx_lock(), to avoid racing 419 /* Do this under netif_tx_lock(), to avoid racing
419 * with efx_xmit(). */ 420 * with efx_xmit(). */
420 netif_tx_lock(efx->net_dev); 421 queue = netdev_get_tx_queue(
422 efx->net_dev,
423 tx_queue->queue / EFX_TXQ_TYPES);
424 __netif_tx_lock(queue, smp_processor_id());
421 if (tx_queue->stopped) { 425 if (tx_queue->stopped) {
422 tx_queue->stopped = 0; 426 tx_queue->stopped = 0;
423 efx_wake_queue(tx_queue->channel); 427 efx_wake_queue(tx_queue->channel);
424 } 428 }
425 netif_tx_unlock(efx->net_dev); 429 __netif_tx_unlock(queue);
426 } 430 }
427 } 431 }
428} 432}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 50259dfec58..b12660d7233 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -45,9 +45,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
45 u32 ioaddr = ndev->base_addr; 45 u32 ioaddr = ndev->base_addr;
46 46
47 if (mdp->duplex) /* Full */ 47 if (mdp->duplex) /* Full */
48 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 48 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
49 else /* Half */ 49 else /* Half */
50 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 50 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
51} 51}
52 52
53static void sh_eth_set_rate(struct net_device *ndev) 53static void sh_eth_set_rate(struct net_device *ndev)
@@ -57,10 +57,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
57 57
58 switch (mdp->speed) { 58 switch (mdp->speed) {
59 case 10: /* 10BASE */ 59 case 10: /* 10BASE */
60 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR); 60 writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
61 break; 61 break;
62 case 100:/* 100BASE */ 62 case 100:/* 100BASE */
63 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR); 63 writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
64 break; 64 break;
65 default: 65 default:
66 break; 66 break;
@@ -96,9 +96,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
96 u32 ioaddr = ndev->base_addr; 96 u32 ioaddr = ndev->base_addr;
97 97
98 if (mdp->duplex) /* Full */ 98 if (mdp->duplex) /* Full */
99 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 99 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
100 else /* Half */ 100 else /* Half */
101 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 101 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
102} 102}
103 103
104static void sh_eth_set_rate(struct net_device *ndev) 104static void sh_eth_set_rate(struct net_device *ndev)
@@ -108,10 +108,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
108 108
109 switch (mdp->speed) { 109 switch (mdp->speed) {
110 case 10: /* 10BASE */ 110 case 10: /* 10BASE */
111 ctrl_outl(0, ioaddr + RTRATE); 111 writel(0, ioaddr + RTRATE);
112 break; 112 break;
113 case 100:/* 100BASE */ 113 case 100:/* 100BASE */
114 ctrl_outl(1, ioaddr + RTRATE); 114 writel(1, ioaddr + RTRATE);
115 break; 115 break;
116 default: 116 default:
117 break; 117 break;
@@ -143,7 +143,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
143static void sh_eth_chip_reset(struct net_device *ndev) 143static void sh_eth_chip_reset(struct net_device *ndev)
144{ 144{
145 /* reset device */ 145 /* reset device */
146 ctrl_outl(ARSTR_ARSTR, ARSTR); 146 writel(ARSTR_ARSTR, ARSTR);
147 mdelay(1); 147 mdelay(1);
148} 148}
149 149
@@ -152,10 +152,10 @@ static void sh_eth_reset(struct net_device *ndev)
152 u32 ioaddr = ndev->base_addr; 152 u32 ioaddr = ndev->base_addr;
153 int cnt = 100; 153 int cnt = 100;
154 154
155 ctrl_outl(EDSR_ENALL, ioaddr + EDSR); 155 writel(EDSR_ENALL, ioaddr + EDSR);
156 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 156 writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
157 while (cnt > 0) { 157 while (cnt > 0) {
158 if (!(ctrl_inl(ioaddr + EDMR) & 0x3)) 158 if (!(readl(ioaddr + EDMR) & 0x3))
159 break; 159 break;
160 mdelay(1); 160 mdelay(1);
161 cnt--; 161 cnt--;
@@ -164,14 +164,14 @@ static void sh_eth_reset(struct net_device *ndev)
164 printk(KERN_ERR "Device reset fail\n"); 164 printk(KERN_ERR "Device reset fail\n");
165 165
166 /* Table Init */ 166 /* Table Init */
167 ctrl_outl(0x0, ioaddr + TDLAR); 167 writel(0x0, ioaddr + TDLAR);
168 ctrl_outl(0x0, ioaddr + TDFAR); 168 writel(0x0, ioaddr + TDFAR);
169 ctrl_outl(0x0, ioaddr + TDFXR); 169 writel(0x0, ioaddr + TDFXR);
170 ctrl_outl(0x0, ioaddr + TDFFR); 170 writel(0x0, ioaddr + TDFFR);
171 ctrl_outl(0x0, ioaddr + RDLAR); 171 writel(0x0, ioaddr + RDLAR);
172 ctrl_outl(0x0, ioaddr + RDFAR); 172 writel(0x0, ioaddr + RDFAR);
173 ctrl_outl(0x0, ioaddr + RDFXR); 173 writel(0x0, ioaddr + RDFXR);
174 ctrl_outl(0x0, ioaddr + RDFFR); 174 writel(0x0, ioaddr + RDFFR);
175} 175}
176 176
177static void sh_eth_set_duplex(struct net_device *ndev) 177static void sh_eth_set_duplex(struct net_device *ndev)
@@ -180,9 +180,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
180 u32 ioaddr = ndev->base_addr; 180 u32 ioaddr = ndev->base_addr;
181 181
182 if (mdp->duplex) /* Full */ 182 if (mdp->duplex) /* Full */
183 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 183 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
184 else /* Half */ 184 else /* Half */
185 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 185 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
186} 186}
187 187
188static void sh_eth_set_rate(struct net_device *ndev) 188static void sh_eth_set_rate(struct net_device *ndev)
@@ -192,13 +192,13 @@ static void sh_eth_set_rate(struct net_device *ndev)
192 192
193 switch (mdp->speed) { 193 switch (mdp->speed) {
194 case 10: /* 10BASE */ 194 case 10: /* 10BASE */
195 ctrl_outl(GECMR_10, ioaddr + GECMR); 195 writel(GECMR_10, ioaddr + GECMR);
196 break; 196 break;
197 case 100:/* 100BASE */ 197 case 100:/* 100BASE */
198 ctrl_outl(GECMR_100, ioaddr + GECMR); 198 writel(GECMR_100, ioaddr + GECMR);
199 break; 199 break;
200 case 1000: /* 1000BASE */ 200 case 1000: /* 1000BASE */
201 ctrl_outl(GECMR_1000, ioaddr + GECMR); 201 writel(GECMR_1000, ioaddr + GECMR);
202 break; 202 break;
203 default: 203 default:
204 break; 204 break;
@@ -283,9 +283,9 @@ static void sh_eth_reset(struct net_device *ndev)
283{ 283{
284 u32 ioaddr = ndev->base_addr; 284 u32 ioaddr = ndev->base_addr;
285 285
286 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 286 writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
287 mdelay(3); 287 mdelay(3);
288 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); 288 writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
289} 289}
290#endif 290#endif
291 291
@@ -336,10 +336,10 @@ static void update_mac_address(struct net_device *ndev)
336{ 336{
337 u32 ioaddr = ndev->base_addr; 337 u32 ioaddr = ndev->base_addr;
338 338
339 ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 339 writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
340 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), 340 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
341 ioaddr + MAHR); 341 ioaddr + MAHR);
342 ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), 342 writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
343 ioaddr + MALR); 343 ioaddr + MALR);
344} 344}
345 345
@@ -358,12 +358,12 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
358 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 358 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
359 memcpy(ndev->dev_addr, mac, 6); 359 memcpy(ndev->dev_addr, mac, 6);
360 } else { 360 } else {
361 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24); 361 ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24);
362 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF; 362 ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF;
363 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF; 363 ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF;
364 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF); 364 ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF);
365 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF; 365 ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF;
366 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF); 366 ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF);
367 } 367 }
368} 368}
369 369
@@ -379,19 +379,19 @@ struct bb_info {
379/* PHY bit set */ 379/* PHY bit set */
380static void bb_set(u32 addr, u32 msk) 380static void bb_set(u32 addr, u32 msk)
381{ 381{
382 ctrl_outl(ctrl_inl(addr) | msk, addr); 382 writel(readl(addr) | msk, addr);
383} 383}
384 384
385/* PHY bit clear */ 385/* PHY bit clear */
386static void bb_clr(u32 addr, u32 msk) 386static void bb_clr(u32 addr, u32 msk)
387{ 387{
388 ctrl_outl((ctrl_inl(addr) & ~msk), addr); 388 writel((readl(addr) & ~msk), addr);
389} 389}
390 390
391/* PHY bit read */ 391/* PHY bit read */
392static int bb_read(u32 addr, u32 msk) 392static int bb_read(u32 addr, u32 msk)
393{ 393{
394 return (ctrl_inl(addr) & msk) != 0; 394 return (readl(addr) & msk) != 0;
395} 395}
396 396
397/* Data I/O pin control */ 397/* Data I/O pin control */
@@ -506,9 +506,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
506 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 506 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
507 /* Rx descriptor address set */ 507 /* Rx descriptor address set */
508 if (i == 0) { 508 if (i == 0) {
509 ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR); 509 writel(mdp->rx_desc_dma, ioaddr + RDLAR);
510#if defined(CONFIG_CPU_SUBTYPE_SH7763) 510#if defined(CONFIG_CPU_SUBTYPE_SH7763)
511 ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR); 511 writel(mdp->rx_desc_dma, ioaddr + RDFAR);
512#endif 512#endif
513 } 513 }
514 } 514 }
@@ -528,9 +528,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
528 txdesc->buffer_length = 0; 528 txdesc->buffer_length = 0;
529 if (i == 0) { 529 if (i == 0) {
530 /* Tx descriptor address set */ 530 /* Tx descriptor address set */
531 ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR); 531 writel(mdp->tx_desc_dma, ioaddr + TDLAR);
532#if defined(CONFIG_CPU_SUBTYPE_SH7763) 532#if defined(CONFIG_CPU_SUBTYPE_SH7763)
533 ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR); 533 writel(mdp->tx_desc_dma, ioaddr + TDFAR);
534#endif 534#endif
535 } 535 }
536 } 536 }
@@ -623,71 +623,71 @@ static int sh_eth_dev_init(struct net_device *ndev)
623 /* Descriptor format */ 623 /* Descriptor format */
624 sh_eth_ring_format(ndev); 624 sh_eth_ring_format(ndev);
625 if (mdp->cd->rpadir) 625 if (mdp->cd->rpadir)
626 ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR); 626 writel(mdp->cd->rpadir_value, ioaddr + RPADIR);
627 627
628 /* all sh_eth int mask */ 628 /* all sh_eth int mask */
629 ctrl_outl(0, ioaddr + EESIPR); 629 writel(0, ioaddr + EESIPR);
630 630
631#if defined(__LITTLE_ENDIAN__) 631#if defined(__LITTLE_ENDIAN__)
632 if (mdp->cd->hw_swap) 632 if (mdp->cd->hw_swap)
633 ctrl_outl(EDMR_EL, ioaddr + EDMR); 633 writel(EDMR_EL, ioaddr + EDMR);
634 else 634 else
635#endif 635#endif
636 ctrl_outl(0, ioaddr + EDMR); 636 writel(0, ioaddr + EDMR);
637 637
638 /* FIFO size set */ 638 /* FIFO size set */
639 ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR); 639 writel(mdp->cd->fdr_value, ioaddr + FDR);
640 ctrl_outl(0, ioaddr + TFTR); 640 writel(0, ioaddr + TFTR);
641 641
642 /* Frame recv control */ 642 /* Frame recv control */
643 ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR); 643 writel(mdp->cd->rmcr_value, ioaddr + RMCR);
644 644
645 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 645 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
646 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 646 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
647 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER); 647 writel(rx_int_var | tx_int_var, ioaddr + TRSCER);
648 648
649 if (mdp->cd->bculr) 649 if (mdp->cd->bculr)
650 ctrl_outl(0x800, ioaddr + BCULR); /* Burst sycle set */ 650 writel(0x800, ioaddr + BCULR); /* Burst sycle set */
651 651
652 ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR); 652 writel(mdp->cd->fcftr_value, ioaddr + FCFTR);
653 653
654 if (!mdp->cd->no_trimd) 654 if (!mdp->cd->no_trimd)
655 ctrl_outl(0, ioaddr + TRIMD); 655 writel(0, ioaddr + TRIMD);
656 656
657 /* Recv frame limit set register */ 657 /* Recv frame limit set register */
658 ctrl_outl(RFLR_VALUE, ioaddr + RFLR); 658 writel(RFLR_VALUE, ioaddr + RFLR);
659 659
660 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR); 660 writel(readl(ioaddr + EESR), ioaddr + EESR);
661 ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR); 661 writel(mdp->cd->eesipr_value, ioaddr + EESIPR);
662 662
663 /* PAUSE Prohibition */ 663 /* PAUSE Prohibition */
664 val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) | 664 val = (readl(ioaddr + ECMR) & ECMR_DM) |
665 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 665 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
666 666
667 ctrl_outl(val, ioaddr + ECMR); 667 writel(val, ioaddr + ECMR);
668 668
669 if (mdp->cd->set_rate) 669 if (mdp->cd->set_rate)
670 mdp->cd->set_rate(ndev); 670 mdp->cd->set_rate(ndev);
671 671
672 /* E-MAC Status Register clear */ 672 /* E-MAC Status Register clear */
673 ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR); 673 writel(mdp->cd->ecsr_value, ioaddr + ECSR);
674 674
675 /* E-MAC Interrupt Enable register */ 675 /* E-MAC Interrupt Enable register */
676 ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR); 676 writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
677 677
678 /* Set MAC address */ 678 /* Set MAC address */
679 update_mac_address(ndev); 679 update_mac_address(ndev);
680 680
681 /* mask reset */ 681 /* mask reset */
682 if (mdp->cd->apr) 682 if (mdp->cd->apr)
683 ctrl_outl(APR_AP, ioaddr + APR); 683 writel(APR_AP, ioaddr + APR);
684 if (mdp->cd->mpr) 684 if (mdp->cd->mpr)
685 ctrl_outl(MPR_MP, ioaddr + MPR); 685 writel(MPR_MP, ioaddr + MPR);
686 if (mdp->cd->tpauser) 686 if (mdp->cd->tpauser)
687 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER); 687 writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
688 688
689 /* Setting the Rx mode will start the Rx process. */ 689 /* Setting the Rx mode will start the Rx process. */
690 ctrl_outl(EDRRR_R, ioaddr + EDRRR); 690 writel(EDRRR_R, ioaddr + EDRRR);
691 691
692 netif_start_queue(ndev); 692 netif_start_queue(ndev);
693 693
@@ -811,8 +811,8 @@ static int sh_eth_rx(struct net_device *ndev)
811 811
812 /* Restart Rx engine if stopped. */ 812 /* Restart Rx engine if stopped. */
813 /* If we don't need to check status, don't. -KDU */ 813 /* If we don't need to check status, don't. -KDU */
814 if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R)) 814 if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R))
815 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR); 815 writel(EDRRR_R, ndev->base_addr + EDRRR);
816 816
817 return 0; 817 return 0;
818} 818}
@@ -827,8 +827,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
827 u32 mask; 827 u32 mask;
828 828
829 if (intr_status & EESR_ECI) { 829 if (intr_status & EESR_ECI) {
830 felic_stat = ctrl_inl(ioaddr + ECSR); 830 felic_stat = readl(ioaddr + ECSR);
831 ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */ 831 writel(felic_stat, ioaddr + ECSR); /* clear int */
832 if (felic_stat & ECSR_ICD) 832 if (felic_stat & ECSR_ICD)
833 mdp->stats.tx_carrier_errors++; 833 mdp->stats.tx_carrier_errors++;
834 if (felic_stat & ECSR_LCHNG) { 834 if (felic_stat & ECSR_LCHNG) {
@@ -839,25 +839,25 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
839 else 839 else
840 link_stat = PHY_ST_LINK; 840 link_stat = PHY_ST_LINK;
841 } else { 841 } else {
842 link_stat = (ctrl_inl(ioaddr + PSR)); 842 link_stat = (readl(ioaddr + PSR));
843 if (mdp->ether_link_active_low) 843 if (mdp->ether_link_active_low)
844 link_stat = ~link_stat; 844 link_stat = ~link_stat;
845 } 845 }
846 if (!(link_stat & PHY_ST_LINK)) { 846 if (!(link_stat & PHY_ST_LINK)) {
847 /* Link Down : disable tx and rx */ 847 /* Link Down : disable tx and rx */
848 ctrl_outl(ctrl_inl(ioaddr + ECMR) & 848 writel(readl(ioaddr + ECMR) &
849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR); 849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
850 } else { 850 } else {
851 /* Link Up */ 851 /* Link Up */
852 ctrl_outl(ctrl_inl(ioaddr + EESIPR) & 852 writel(readl(ioaddr + EESIPR) &
853 ~DMAC_M_ECI, ioaddr + EESIPR); 853 ~DMAC_M_ECI, ioaddr + EESIPR);
854 /*clear int */ 854 /*clear int */
855 ctrl_outl(ctrl_inl(ioaddr + ECSR), 855 writel(readl(ioaddr + ECSR),
856 ioaddr + ECSR); 856 ioaddr + ECSR);
857 ctrl_outl(ctrl_inl(ioaddr + EESIPR) | 857 writel(readl(ioaddr + EESIPR) |
858 DMAC_M_ECI, ioaddr + EESIPR); 858 DMAC_M_ECI, ioaddr + EESIPR);
859 /* enable tx and rx */ 859 /* enable tx and rx */
860 ctrl_outl(ctrl_inl(ioaddr + ECMR) | 860 writel(readl(ioaddr + ECMR) |
861 (ECMR_RE | ECMR_TE), ioaddr + ECMR); 861 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
862 } 862 }
863 } 863 }
@@ -888,8 +888,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
888 /* Receive Descriptor Empty int */ 888 /* Receive Descriptor Empty int */
889 mdp->stats.rx_over_errors++; 889 mdp->stats.rx_over_errors++;
890 890
891 if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R) 891 if (readl(ioaddr + EDRRR) ^ EDRRR_R)
892 ctrl_outl(EDRRR_R, ioaddr + EDRRR); 892 writel(EDRRR_R, ioaddr + EDRRR);
893 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 893 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
894 } 894 }
895 if (intr_status & EESR_RFE) { 895 if (intr_status & EESR_RFE) {
@@ -903,7 +903,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
903 mask &= ~EESR_ADE; 903 mask &= ~EESR_ADE;
904 if (intr_status & mask) { 904 if (intr_status & mask) {
905 /* Tx error */ 905 /* Tx error */
906 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR); 906 u32 edtrr = readl(ndev->base_addr + EDTRR);
907 /* dmesg */ 907 /* dmesg */
908 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 908 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
909 intr_status, mdp->cur_tx); 909 intr_status, mdp->cur_tx);
@@ -915,7 +915,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
915 /* SH7712 BUG */ 915 /* SH7712 BUG */
916 if (edtrr ^ EDTRR_TRNS) { 916 if (edtrr ^ EDTRR_TRNS) {
917 /* tx dma start */ 917 /* tx dma start */
918 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 918 writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
919 } 919 }
920 /* wakeup */ 920 /* wakeup */
921 netif_wake_queue(ndev); 921 netif_wake_queue(ndev);
@@ -934,12 +934,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
934 spin_lock(&mdp->lock); 934 spin_lock(&mdp->lock);
935 935
936 /* Get interrpt stat */ 936 /* Get interrpt stat */
937 intr_status = ctrl_inl(ioaddr + EESR); 937 intr_status = readl(ioaddr + EESR);
938 /* Clear interrupt */ 938 /* Clear interrupt */
939 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 939 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
940 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 940 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
941 cd->tx_check | cd->eesr_err_check)) { 941 cd->tx_check | cd->eesr_err_check)) {
942 ctrl_outl(intr_status, ioaddr + EESR); 942 writel(intr_status, ioaddr + EESR);
943 ret = IRQ_HANDLED; 943 ret = IRQ_HANDLED;
944 } else 944 } else
945 goto other_irq; 945 goto other_irq;
@@ -1000,7 +1000,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1000 mdp->cd->set_rate(ndev); 1000 mdp->cd->set_rate(ndev);
1001 } 1001 }
1002 if (mdp->link == PHY_DOWN) { 1002 if (mdp->link == PHY_DOWN) {
1003 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF) 1003 writel((readl(ioaddr + ECMR) & ~ECMR_TXF)
1004 | ECMR_DM, ioaddr + ECMR); 1004 | ECMR_DM, ioaddr + ECMR);
1005 new_state = 1; 1005 new_state = 1;
1006 mdp->link = phydev->link; 1006 mdp->link = phydev->link;
@@ -1125,7 +1125,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1125 1125
1126 /* worning message out. */ 1126 /* worning message out. */
1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
1128 " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR)); 1128 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
1129 1129
1130 /* tx_errors count up */ 1130 /* tx_errors count up */
1131 mdp->stats.tx_errors++; 1131 mdp->stats.tx_errors++;
@@ -1196,8 +1196,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1196 1196
1197 mdp->cur_tx++; 1197 mdp->cur_tx++;
1198 1198
1199 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS)) 1199 if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
1200 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 1200 writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
1201 1201
1202 return NETDEV_TX_OK; 1202 return NETDEV_TX_OK;
1203} 1203}
@@ -1212,11 +1212,11 @@ static int sh_eth_close(struct net_device *ndev)
1212 netif_stop_queue(ndev); 1212 netif_stop_queue(ndev);
1213 1213
1214 /* Disable interrupts by clearing the interrupt mask. */ 1214 /* Disable interrupts by clearing the interrupt mask. */
1215 ctrl_outl(0x0000, ioaddr + EESIPR); 1215 writel(0x0000, ioaddr + EESIPR);
1216 1216
1217 /* Stop the chip's Tx and Rx processes. */ 1217 /* Stop the chip's Tx and Rx processes. */
1218 ctrl_outl(0, ioaddr + EDTRR); 1218 writel(0, ioaddr + EDTRR);
1219 ctrl_outl(0, ioaddr + EDRRR); 1219 writel(0, ioaddr + EDRRR);
1220 1220
1221 /* PHY Disconnect */ 1221 /* PHY Disconnect */
1222 if (mdp->phydev) { 1222 if (mdp->phydev) {
@@ -1251,20 +1251,20 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1251 1251
1252 pm_runtime_get_sync(&mdp->pdev->dev); 1252 pm_runtime_get_sync(&mdp->pdev->dev);
1253 1253
1254 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR); 1254 mdp->stats.tx_dropped += readl(ioaddr + TROCR);
1255 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */ 1255 writel(0, ioaddr + TROCR); /* (write clear) */
1256 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR); 1256 mdp->stats.collisions += readl(ioaddr + CDCR);
1257 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */ 1257 writel(0, ioaddr + CDCR); /* (write clear) */
1258 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR); 1258 mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR);
1259 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */ 1259 writel(0, ioaddr + LCCR); /* (write clear) */
1260#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1260#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1261 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */ 1261 mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */
1262 ctrl_outl(0, ioaddr + CERCR); /* (write clear) */ 1262 writel(0, ioaddr + CERCR); /* (write clear) */
1263 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */ 1263 mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */
1264 ctrl_outl(0, ioaddr + CEECR); /* (write clear) */ 1264 writel(0, ioaddr + CEECR); /* (write clear) */
1265#else 1265#else
1266 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR); 1266 mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR);
1267 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */ 1267 writel(0, ioaddr + CNDCR); /* (write clear) */
1268#endif 1268#endif
1269 pm_runtime_put_sync(&mdp->pdev->dev); 1269 pm_runtime_put_sync(&mdp->pdev->dev);
1270 1270
@@ -1295,11 +1295,11 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
1295 1295
1296 if (ndev->flags & IFF_PROMISC) { 1296 if (ndev->flags & IFF_PROMISC) {
1297 /* Set promiscuous. */ 1297 /* Set promiscuous. */
1298 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM, 1298 writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
1299 ioaddr + ECMR); 1299 ioaddr + ECMR);
1300 } else { 1300 } else {
1301 /* Normal, unicast/broadcast-only mode. */ 1301 /* Normal, unicast/broadcast-only mode. */
1302 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT, 1302 writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
1303 ioaddr + ECMR); 1303 ioaddr + ECMR);
1304 } 1304 }
1305} 1305}
@@ -1307,30 +1307,30 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
1307/* SuperH's TSU register init function */ 1307/* SuperH's TSU register init function */
1308static void sh_eth_tsu_init(u32 ioaddr) 1308static void sh_eth_tsu_init(u32 ioaddr)
1309{ 1309{
1310 ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */ 1310 writel(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
1311 ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */ 1311 writel(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
1312 ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */ 1312 writel(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
1313 ctrl_outl(0xc, ioaddr + TSU_BSYSL0); 1313 writel(0xc, ioaddr + TSU_BSYSL0);
1314 ctrl_outl(0xc, ioaddr + TSU_BSYSL1); 1314 writel(0xc, ioaddr + TSU_BSYSL1);
1315 ctrl_outl(0, ioaddr + TSU_PRISL0); 1315 writel(0, ioaddr + TSU_PRISL0);
1316 ctrl_outl(0, ioaddr + TSU_PRISL1); 1316 writel(0, ioaddr + TSU_PRISL1);
1317 ctrl_outl(0, ioaddr + TSU_FWSL0); 1317 writel(0, ioaddr + TSU_FWSL0);
1318 ctrl_outl(0, ioaddr + TSU_FWSL1); 1318 writel(0, ioaddr + TSU_FWSL1);
1319 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC); 1319 writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
1320#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1320#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1321 ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */ 1321 writel(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
1322 ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */ 1322 writel(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
1323#else 1323#else
1324 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */ 1324 writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
1325 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */ 1325 writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
1326#endif 1326#endif
1327 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */ 1327 writel(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
1328 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */ 1328 writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
1329 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */ 1329 writel(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
1330 ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */ 1330 writel(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
1331 ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */ 1331 writel(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
1332 ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */ 1332 writel(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
1333 ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */ 1333 writel(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
1334} 1334}
1335#endif /* SH_ETH_HAS_TSU */ 1335#endif /* SH_ETH_HAS_TSU */
1336 1336
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 79bdc2e1322..5f06c4706ab 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,7 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Apr_2010" 23#define DRV_MODULE_VERSION "Nov_2010"
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/stmmac.h> 25#include <linux/stmmac.h>
26 26
@@ -37,7 +37,6 @@ struct stmmac_priv {
37 unsigned int cur_tx; 37 unsigned int cur_tx;
38 unsigned int dirty_tx; 38 unsigned int dirty_tx;
39 unsigned int dma_tx_size; 39 unsigned int dma_tx_size;
40 int tx_coe;
41 int tx_coalesce; 40 int tx_coalesce;
42 41
43 struct dma_desc *dma_rx ; 42 struct dma_desc *dma_rx ;
@@ -48,7 +47,6 @@ struct stmmac_priv {
48 struct sk_buff_head rx_recycle; 47 struct sk_buff_head rx_recycle;
49 48
50 struct net_device *dev; 49 struct net_device *dev;
51 int is_gmac;
52 dma_addr_t dma_rx_phy; 50 dma_addr_t dma_rx_phy;
53 unsigned int dma_rx_size; 51 unsigned int dma_rx_size;
54 unsigned int dma_buf_sz; 52 unsigned int dma_buf_sz;
@@ -60,14 +58,11 @@ struct stmmac_priv {
60 struct napi_struct napi; 58 struct napi_struct napi;
61 59
62 phy_interface_t phy_interface; 60 phy_interface_t phy_interface;
63 int pbl;
64 int bus_id;
65 int phy_addr; 61 int phy_addr;
66 int phy_mask; 62 int phy_mask;
67 int (*phy_reset) (void *priv); 63 int (*phy_reset) (void *priv);
68 void (*fix_mac_speed) (void *priv, unsigned int speed); 64 int rx_coe;
69 void (*bus_setup)(void __iomem *ioaddr); 65 int no_csum_insertion;
70 void *bsp_priv;
71 66
72 int phy_irq; 67 int phy_irq;
73 struct phy_device *phydev; 68 struct phy_device *phydev;
@@ -77,47 +72,20 @@ struct stmmac_priv {
77 unsigned int flow_ctrl; 72 unsigned int flow_ctrl;
78 unsigned int pause; 73 unsigned int pause;
79 struct mii_bus *mii; 74 struct mii_bus *mii;
80 int mii_clk_csr;
81 75
82 u32 msg_enable; 76 u32 msg_enable;
83 spinlock_t lock; 77 spinlock_t lock;
84 int wolopts; 78 int wolopts;
85 int wolenabled; 79 int wolenabled;
86 int shutdown;
87#ifdef CONFIG_STMMAC_TIMER 80#ifdef CONFIG_STMMAC_TIMER
88 struct stmmac_timer *tm; 81 struct stmmac_timer *tm;
89#endif 82#endif
90#ifdef STMMAC_VLAN_TAG_USED 83#ifdef STMMAC_VLAN_TAG_USED
91 struct vlan_group *vlgrp; 84 struct vlan_group *vlgrp;
92#endif 85#endif
93 int enh_desc; 86 struct plat_stmmacenet_data *plat;
94 int rx_coe;
95 int bugged_jumbo;
96 int no_csum_insertion;
97}; 87};
98 88
99#ifdef CONFIG_STM_DRIVERS
100#include <linux/stm/pad.h>
101static inline int stmmac_claim_resource(struct platform_device *pdev)
102{
103 int ret = 0;
104 struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
105
106 /* Pad routing setup */
107 if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
108 dev_name(&pdev->dev)))) {
109 printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
110 ret = -ENODEV;
111 }
112 return ret;
113}
114#else
115static inline int stmmac_claim_resource(struct platform_device *pdev)
116{
117 return 0;
118}
119#endif
120
121extern int stmmac_mdio_unregister(struct net_device *ndev); 89extern int stmmac_mdio_unregister(struct net_device *ndev);
122extern int stmmac_mdio_register(struct net_device *ndev); 90extern int stmmac_mdio_register(struct net_device *ndev);
123extern void stmmac_set_ethtool_ops(struct net_device *netdev); 91extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 6d65482e789..fd719edc7f7 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -94,7 +94,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
94{ 94{
95 struct stmmac_priv *priv = netdev_priv(dev); 95 struct stmmac_priv *priv = netdev_priv(dev);
96 96
97 if (!priv->is_gmac) 97 if (!priv->plat->has_gmac)
98 strcpy(info->driver, MAC100_ETHTOOL_NAME); 98 strcpy(info->driver, MAC100_ETHTOOL_NAME);
99 else 99 else
100 strcpy(info->driver, GMAC_ETHTOOL_NAME); 100 strcpy(info->driver, GMAC_ETHTOOL_NAME);
@@ -176,7 +176,7 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
176 176
177 memset(reg_space, 0x0, REG_SPACE_SIZE); 177 memset(reg_space, 0x0, REG_SPACE_SIZE);
178 178
179 if (!priv->is_gmac) { 179 if (!priv->plat->has_gmac) {
180 /* MAC registers */ 180 /* MAC registers */
181 for (i = 0; i < 12; i++) 181 for (i = 0; i < 12; i++)
182 reg_space[i] = readl(priv->ioaddr + (i * 4)); 182 reg_space[i] = readl(priv->ioaddr + (i * 4));
@@ -197,16 +197,6 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
197 } 197 }
198} 198}
199 199
200static int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
201{
202 if (data)
203 netdev->features |= NETIF_F_HW_CSUM;
204 else
205 netdev->features &= ~NETIF_F_HW_CSUM;
206
207 return 0;
208}
209
210static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev) 200static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
211{ 201{
212 struct stmmac_priv *priv = netdev_priv(dev); 202 struct stmmac_priv *priv = netdev_priv(dev);
@@ -370,7 +360,7 @@ static struct ethtool_ops stmmac_ethtool_ops = {
370 .get_link = ethtool_op_get_link, 360 .get_link = ethtool_op_get_link,
371 .get_rx_csum = stmmac_ethtool_get_rx_csum, 361 .get_rx_csum = stmmac_ethtool_get_rx_csum,
372 .get_tx_csum = ethtool_op_get_tx_csum, 362 .get_tx_csum = ethtool_op_get_tx_csum,
373 .set_tx_csum = stmmac_ethtool_set_tx_csum, 363 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
374 .get_sg = ethtool_op_get_sg, 364 .get_sg = ethtool_op_get_sg,
375 .set_sg = ethtool_op_set_sg, 365 .set_sg = ethtool_op_set_sg,
376 .get_pauseparam = stmmac_get_pauseparam, 366 .get_pauseparam = stmmac_get_pauseparam,
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 2114837809e..c0dc78571c6 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -186,6 +186,18 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
186 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; 186 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
187} 187}
188 188
189/* On some ST platforms, some HW system configuraton registers have to be
190 * set according to the link speed negotiated.
191 */
192static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
193{
194 struct phy_device *phydev = priv->phydev;
195
196 if (likely(priv->plat->fix_mac_speed))
197 priv->plat->fix_mac_speed(priv->plat->bsp_priv,
198 phydev->speed);
199}
200
189/** 201/**
190 * stmmac_adjust_link 202 * stmmac_adjust_link
191 * @dev: net device structure 203 * @dev: net device structure
@@ -228,15 +240,13 @@ static void stmmac_adjust_link(struct net_device *dev)
228 new_state = 1; 240 new_state = 1;
229 switch (phydev->speed) { 241 switch (phydev->speed) {
230 case 1000: 242 case 1000:
231 if (likely(priv->is_gmac)) 243 if (likely(priv->plat->has_gmac))
232 ctrl &= ~priv->hw->link.port; 244 ctrl &= ~priv->hw->link.port;
233 if (likely(priv->fix_mac_speed)) 245 stmmac_hw_fix_mac_speed(priv);
234 priv->fix_mac_speed(priv->bsp_priv,
235 phydev->speed);
236 break; 246 break;
237 case 100: 247 case 100:
238 case 10: 248 case 10:
239 if (priv->is_gmac) { 249 if (priv->plat->has_gmac) {
240 ctrl |= priv->hw->link.port; 250 ctrl |= priv->hw->link.port;
241 if (phydev->speed == SPEED_100) { 251 if (phydev->speed == SPEED_100) {
242 ctrl |= priv->hw->link.speed; 252 ctrl |= priv->hw->link.speed;
@@ -246,9 +256,7 @@ static void stmmac_adjust_link(struct net_device *dev)
246 } else { 256 } else {
247 ctrl &= ~priv->hw->link.port; 257 ctrl &= ~priv->hw->link.port;
248 } 258 }
249 if (likely(priv->fix_mac_speed)) 259 stmmac_hw_fix_mac_speed(priv);
250 priv->fix_mac_speed(priv->bsp_priv,
251 phydev->speed);
252 break; 260 break;
253 default: 261 default:
254 if (netif_msg_link(priv)) 262 if (netif_msg_link(priv))
@@ -305,7 +313,7 @@ static int stmmac_init_phy(struct net_device *dev)
305 return 0; 313 return 0;
306 } 314 }
307 315
308 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 316 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
309 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 317 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
310 priv->phy_addr); 318 priv->phy_addr);
311 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 319 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -552,7 +560,7 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
552 */ 560 */
553static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 561static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
554{ 562{
555 if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) { 563 if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
556 /* In case of GMAC, SF mode has to be enabled 564 /* In case of GMAC, SF mode has to be enabled
557 * to perform the TX COE. This depends on: 565 * to perform the TX COE. This depends on:
558 * 1) TX COE if actually supported 566 * 1) TX COE if actually supported
@@ -814,7 +822,7 @@ static int stmmac_open(struct net_device *dev)
814 init_dma_desc_rings(dev); 822 init_dma_desc_rings(dev);
815 823
816 /* DMA initialization and SW reset */ 824 /* DMA initialization and SW reset */
817 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl, 825 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
818 priv->dma_tx_phy, 826 priv->dma_tx_phy,
819 priv->dma_rx_phy) < 0)) { 827 priv->dma_rx_phy) < 0)) {
820 828
@@ -825,19 +833,17 @@ static int stmmac_open(struct net_device *dev)
825 /* Copy the MAC addr into the HW */ 833 /* Copy the MAC addr into the HW */
826 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); 834 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
827 /* If required, perform hw setup of the bus. */ 835 /* If required, perform hw setup of the bus. */
828 if (priv->bus_setup) 836 if (priv->plat->bus_setup)
829 priv->bus_setup(priv->ioaddr); 837 priv->plat->bus_setup(priv->ioaddr);
830 /* Initialize the MAC Core */ 838 /* Initialize the MAC Core */
831 priv->hw->mac->core_init(priv->ioaddr); 839 priv->hw->mac->core_init(priv->ioaddr);
832 840
833 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); 841 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
834 if (priv->rx_coe) 842 if (priv->rx_coe)
835 pr_info("stmmac: Rx Checksum Offload Engine supported\n"); 843 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
836 if (priv->tx_coe) 844 if (priv->plat->tx_coe)
837 pr_info("\tTX Checksum insertion supported\n"); 845 pr_info("\tTX Checksum insertion supported\n");
838 846
839 priv->shutdown = 0;
840
841 /* Initialise the MMC (if present) to disable all interrupts. */ 847 /* Initialise the MMC (if present) to disable all interrupts. */
842 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); 848 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
843 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); 849 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
@@ -1042,7 +1048,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1042 return stmmac_sw_tso(priv, skb); 1048 return stmmac_sw_tso(priv, skb);
1043 1049
1044 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) { 1050 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1045 if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion))) 1051 if (unlikely((!priv->plat->tx_coe) ||
1052 (priv->no_csum_insertion)))
1046 skb_checksum_help(skb); 1053 skb_checksum_help(skb);
1047 else 1054 else
1048 csum_insertion = 1; 1055 csum_insertion = 1;
@@ -1146,7 +1153,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1146 DMA_FROM_DEVICE); 1153 DMA_FROM_DEVICE);
1147 1154
1148 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1155 (p + entry)->des2 = priv->rx_skbuff_dma[entry];
1149 if (unlikely(priv->is_gmac)) { 1156 if (unlikely(priv->plat->has_gmac)) {
1150 if (bfsize >= BUF_SIZE_8KiB) 1157 if (bfsize >= BUF_SIZE_8KiB)
1151 (p + entry)->des3 = 1158 (p + entry)->des3 =
1152 (p + entry)->des2 + BUF_SIZE_8KiB; 1159 (p + entry)->des2 + BUF_SIZE_8KiB;
@@ -1356,7 +1363,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1356 return -EBUSY; 1363 return -EBUSY;
1357 } 1364 }
1358 1365
1359 if (priv->is_gmac) 1366 if (priv->plat->has_gmac)
1360 max_mtu = JUMBO_LEN; 1367 max_mtu = JUMBO_LEN;
1361 else 1368 else
1362 max_mtu = ETH_DATA_LEN; 1369 max_mtu = ETH_DATA_LEN;
@@ -1370,7 +1377,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1370 * needs to have the Tx COE disabled for oversized frames 1377 * needs to have the Tx COE disabled for oversized frames
1371 * (due to limited buffer sizes). In this case we disable 1378 * (due to limited buffer sizes). In this case we disable
1372 * the TX csum insertionin the TDES and not use SF. */ 1379 * the TX csum insertionin the TDES and not use SF. */
1373 if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN)) 1380 if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
1374 priv->no_csum_insertion = 1; 1381 priv->no_csum_insertion = 1;
1375 else 1382 else
1376 priv->no_csum_insertion = 0; 1383 priv->no_csum_insertion = 0;
@@ -1390,7 +1397,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1390 return IRQ_NONE; 1397 return IRQ_NONE;
1391 } 1398 }
1392 1399
1393 if (priv->is_gmac) 1400 if (priv->plat->has_gmac)
1394 /* To handle GMAC own interrupts */ 1401 /* To handle GMAC own interrupts */
1395 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr); 1402 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
1396 1403
@@ -1487,7 +1494,8 @@ static int stmmac_probe(struct net_device *dev)
1487 dev->netdev_ops = &stmmac_netdev_ops; 1494 dev->netdev_ops = &stmmac_netdev_ops;
1488 stmmac_set_ethtool_ops(dev); 1495 stmmac_set_ethtool_ops(dev);
1489 1496
1490 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA); 1497 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
1498 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1491 dev->watchdog_timeo = msecs_to_jiffies(watchdog); 1499 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1492#ifdef STMMAC_VLAN_TAG_USED 1500#ifdef STMMAC_VLAN_TAG_USED
1493 /* Both mac100 and gmac support receive VLAN tag detection */ 1501 /* Both mac100 and gmac support receive VLAN tag detection */
@@ -1520,7 +1528,7 @@ static int stmmac_probe(struct net_device *dev)
1520 1528
1521 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", 1529 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
1522 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", 1530 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
1523 (dev->features & NETIF_F_HW_CSUM) ? "on" : "off"); 1531 (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
1524 1532
1525 return ret; 1533 return ret;
1526} 1534}
@@ -1536,7 +1544,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1536 1544
1537 struct mac_device_info *device; 1545 struct mac_device_info *device;
1538 1546
1539 if (priv->is_gmac) 1547 if (priv->plat->has_gmac)
1540 device = dwmac1000_setup(priv->ioaddr); 1548 device = dwmac1000_setup(priv->ioaddr);
1541 else 1549 else
1542 device = dwmac100_setup(priv->ioaddr); 1550 device = dwmac100_setup(priv->ioaddr);
@@ -1544,7 +1552,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1544 if (!device) 1552 if (!device)
1545 return -ENOMEM; 1553 return -ENOMEM;
1546 1554
1547 if (priv->enh_desc) { 1555 if (priv->plat->enh_desc) {
1548 device->desc = &enh_desc_ops; 1556 device->desc = &enh_desc_ops;
1549 pr_info("\tEnhanced descriptor structure\n"); 1557 pr_info("\tEnhanced descriptor structure\n");
1550 } else 1558 } else
@@ -1598,7 +1606,7 @@ static int stmmac_associate_phy(struct device *dev, void *data)
1598 plat_dat->bus_id); 1606 plat_dat->bus_id);
1599 1607
1600 /* Check that this phy is for the MAC being initialised */ 1608 /* Check that this phy is for the MAC being initialised */
1601 if (priv->bus_id != plat_dat->bus_id) 1609 if (priv->plat->bus_id != plat_dat->bus_id)
1602 return 0; 1610 return 0;
1603 1611
1604 /* OK, this PHY is connected to the MAC. 1612 /* OK, this PHY is connected to the MAC.
@@ -1634,7 +1642,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1634 struct resource *res; 1642 struct resource *res;
1635 void __iomem *addr = NULL; 1643 void __iomem *addr = NULL;
1636 struct net_device *ndev = NULL; 1644 struct net_device *ndev = NULL;
1637 struct stmmac_priv *priv; 1645 struct stmmac_priv *priv = NULL;
1638 struct plat_stmmacenet_data *plat_dat; 1646 struct plat_stmmacenet_data *plat_dat;
1639 1647
1640 pr_info("STMMAC driver:\n\tplatform registration... "); 1648 pr_info("STMMAC driver:\n\tplatform registration... ");
@@ -1683,13 +1691,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1683 priv->device = &(pdev->dev); 1691 priv->device = &(pdev->dev);
1684 priv->dev = ndev; 1692 priv->dev = ndev;
1685 plat_dat = pdev->dev.platform_data; 1693 plat_dat = pdev->dev.platform_data;
1686 priv->bus_id = plat_dat->bus_id; 1694
1687 priv->pbl = plat_dat->pbl; /* TLI */ 1695 priv->plat = plat_dat;
1688 priv->mii_clk_csr = plat_dat->clk_csr; 1696
1689 priv->tx_coe = plat_dat->tx_coe;
1690 priv->bugged_jumbo = plat_dat->bugged_jumbo;
1691 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1692 priv->enh_desc = plat_dat->enh_desc;
1693 priv->ioaddr = addr; 1697 priv->ioaddr = addr;
1694 1698
1695 /* PMT module is not integrated in all the MAC devices. */ 1699 /* PMT module is not integrated in all the MAC devices. */
@@ -1703,10 +1707,12 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1703 /* Set the I/O base addr */ 1707 /* Set the I/O base addr */
1704 ndev->base_addr = (unsigned long)addr; 1708 ndev->base_addr = (unsigned long)addr;
1705 1709
1706 /* Verify embedded resource for the platform */ 1710 /* Custom initialisation */
1707 ret = stmmac_claim_resource(pdev); 1711 if (priv->plat->init) {
1708 if (ret < 0) 1712 ret = priv->plat->init(pdev);
1709 goto out; 1713 if (unlikely(ret))
1714 goto out;
1715 }
1710 1716
1711 /* MAC HW revice detection */ 1717 /* MAC HW revice detection */
1712 ret = stmmac_mac_device_setup(ndev); 1718 ret = stmmac_mac_device_setup(ndev);
@@ -1727,16 +1733,12 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1727 goto out; 1733 goto out;
1728 } 1734 }
1729 1735
1730 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1731 priv->bus_setup = plat_dat->bus_setup;
1732 priv->bsp_priv = plat_dat->bsp_priv;
1733
1734 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" 1736 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
1735 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name, 1737 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
1736 pdev->id, ndev->irq, addr); 1738 pdev->id, ndev->irq, addr);
1737 1739
1738 /* MDIO bus Registration */ 1740 /* MDIO bus Registration */
1739 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id); 1741 pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
1740 ret = stmmac_mdio_register(ndev); 1742 ret = stmmac_mdio_register(ndev);
1741 if (ret < 0) 1743 if (ret < 0)
1742 goto out; 1744 goto out;
@@ -1744,6 +1746,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1744 1746
1745out: 1747out:
1746 if (ret < 0) { 1748 if (ret < 0) {
1749 if (priv->plat->exit)
1750 priv->plat->exit(pdev);
1751
1747 platform_set_drvdata(pdev, NULL); 1752 platform_set_drvdata(pdev, NULL);
1748 release_mem_region(res->start, resource_size(res)); 1753 release_mem_region(res->start, resource_size(res));
1749 if (addr != NULL) 1754 if (addr != NULL)
@@ -1777,6 +1782,9 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1777 1782
1778 stmmac_mdio_unregister(ndev); 1783 stmmac_mdio_unregister(ndev);
1779 1784
1785 if (priv->plat->exit)
1786 priv->plat->exit(pdev);
1787
1780 platform_set_drvdata(pdev, NULL); 1788 platform_set_drvdata(pdev, NULL);
1781 unregister_netdev(ndev); 1789 unregister_netdev(ndev);
1782 1790
@@ -1790,69 +1798,54 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1790} 1798}
1791 1799
1792#ifdef CONFIG_PM 1800#ifdef CONFIG_PM
1793static int stmmac_suspend(struct platform_device *pdev, pm_message_t state) 1801static int stmmac_suspend(struct device *dev)
1794{ 1802{
1795 struct net_device *dev = platform_get_drvdata(pdev); 1803 struct net_device *ndev = dev_get_drvdata(dev);
1796 struct stmmac_priv *priv = netdev_priv(dev); 1804 struct stmmac_priv *priv = netdev_priv(ndev);
1797 int dis_ic = 0; 1805 int dis_ic = 0;
1798 1806
1799 if (!dev || !netif_running(dev)) 1807 if (!ndev || !netif_running(ndev))
1800 return 0; 1808 return 0;
1801 1809
1802 spin_lock(&priv->lock); 1810 spin_lock(&priv->lock);
1803 1811
1804 if (state.event == PM_EVENT_SUSPEND) { 1812 netif_device_detach(ndev);
1805 netif_device_detach(dev); 1813 netif_stop_queue(ndev);
1806 netif_stop_queue(dev); 1814 if (priv->phydev)
1807 if (priv->phydev) 1815 phy_stop(priv->phydev);
1808 phy_stop(priv->phydev);
1809 1816
1810#ifdef CONFIG_STMMAC_TIMER 1817#ifdef CONFIG_STMMAC_TIMER
1811 priv->tm->timer_stop(); 1818 priv->tm->timer_stop();
1812 if (likely(priv->tm->enable)) 1819 if (likely(priv->tm->enable))
1813 dis_ic = 1; 1820 dis_ic = 1;
1814#endif 1821#endif
1815 napi_disable(&priv->napi); 1822 napi_disable(&priv->napi);
1816 1823
1817 /* Stop TX/RX DMA */ 1824 /* Stop TX/RX DMA */
1818 priv->hw->dma->stop_tx(priv->ioaddr); 1825 priv->hw->dma->stop_tx(priv->ioaddr);
1819 priv->hw->dma->stop_rx(priv->ioaddr); 1826 priv->hw->dma->stop_rx(priv->ioaddr);
1820 /* Clear the Rx/Tx descriptors */ 1827 /* Clear the Rx/Tx descriptors */
1821 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 1828 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
1822 dis_ic); 1829 dis_ic);
1823 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1830 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
1824 1831
1825 /* Enable Power down mode by programming the PMT regs */ 1832 /* Enable Power down mode by programming the PMT regs */
1826 if (device_can_wakeup(priv->device)) 1833 if (device_may_wakeup(priv->device))
1827 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 1834 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1828 else 1835 else
1829 stmmac_disable_mac(priv->ioaddr); 1836 stmmac_disable_mac(priv->ioaddr);
1830 } else {
1831 priv->shutdown = 1;
1832 /* Although this can appear slightly redundant it actually
1833 * makes fast the standby operation and guarantees the driver
1834 * working if hibernation is on media. */
1835 stmmac_release(dev);
1836 }
1837 1837
1838 spin_unlock(&priv->lock); 1838 spin_unlock(&priv->lock);
1839 return 0; 1839 return 0;
1840} 1840}
1841 1841
1842static int stmmac_resume(struct platform_device *pdev) 1842static int stmmac_resume(struct device *dev)
1843{ 1843{
1844 struct net_device *dev = platform_get_drvdata(pdev); 1844 struct net_device *ndev = dev_get_drvdata(dev);
1845 struct stmmac_priv *priv = netdev_priv(dev); 1845 struct stmmac_priv *priv = netdev_priv(ndev);
1846
1847 if (!netif_running(dev))
1848 return 0;
1849 1846
1850 if (priv->shutdown) { 1847 if (!netif_running(ndev))
1851 /* Re-open the interface and re-init the MAC/DMA
1852 and the rings (i.e. on hibernation stage) */
1853 stmmac_open(dev);
1854 return 0; 1848 return 0;
1855 }
1856 1849
1857 spin_lock(&priv->lock); 1850 spin_lock(&priv->lock);
1858 1851
@@ -1861,10 +1854,10 @@ static int stmmac_resume(struct platform_device *pdev)
1861 * is received. Anyway, it's better to manually clear 1854 * is received. Anyway, it's better to manually clear
1862 * this bit because it can generate problems while resuming 1855 * this bit because it can generate problems while resuming
1863 * from another devices (e.g. serial console). */ 1856 * from another devices (e.g. serial console). */
1864 if (device_can_wakeup(priv->device)) 1857 if (device_may_wakeup(priv->device))
1865 priv->hw->mac->pmt(priv->ioaddr, 0); 1858 priv->hw->mac->pmt(priv->ioaddr, 0);
1866 1859
1867 netif_device_attach(dev); 1860 netif_device_attach(ndev);
1868 1861
1869 /* Enable the MAC and DMA */ 1862 /* Enable the MAC and DMA */
1870 stmmac_enable_mac(priv->ioaddr); 1863 stmmac_enable_mac(priv->ioaddr);
@@ -1872,31 +1865,59 @@ static int stmmac_resume(struct platform_device *pdev)
1872 priv->hw->dma->start_rx(priv->ioaddr); 1865 priv->hw->dma->start_rx(priv->ioaddr);
1873 1866
1874#ifdef CONFIG_STMMAC_TIMER 1867#ifdef CONFIG_STMMAC_TIMER
1875 priv->tm->timer_start(tmrate); 1868 if (likely(priv->tm->enable))
1869 priv->tm->timer_start(tmrate);
1876#endif 1870#endif
1877 napi_enable(&priv->napi); 1871 napi_enable(&priv->napi);
1878 1872
1879 if (priv->phydev) 1873 if (priv->phydev)
1880 phy_start(priv->phydev); 1874 phy_start(priv->phydev);
1881 1875
1882 netif_start_queue(dev); 1876 netif_start_queue(ndev);
1883 1877
1884 spin_unlock(&priv->lock); 1878 spin_unlock(&priv->lock);
1885 return 0; 1879 return 0;
1886} 1880}
1887#endif
1888 1881
1889static struct platform_driver stmmac_driver = { 1882static int stmmac_freeze(struct device *dev)
1890 .driver = { 1883{
1891 .name = STMMAC_RESOURCE_NAME, 1884 struct net_device *ndev = dev_get_drvdata(dev);
1892 }, 1885
1893 .probe = stmmac_dvr_probe, 1886 if (!ndev || !netif_running(ndev))
1894 .remove = stmmac_dvr_remove, 1887 return 0;
1895#ifdef CONFIG_PM 1888
1889 return stmmac_release(ndev);
1890}
1891
1892static int stmmac_restore(struct device *dev)
1893{
1894 struct net_device *ndev = dev_get_drvdata(dev);
1895
1896 if (!ndev || !netif_running(ndev))
1897 return 0;
1898
1899 return stmmac_open(ndev);
1900}
1901
1902static const struct dev_pm_ops stmmac_pm_ops = {
1896 .suspend = stmmac_suspend, 1903 .suspend = stmmac_suspend,
1897 .resume = stmmac_resume, 1904 .resume = stmmac_resume,
1898#endif 1905 .freeze = stmmac_freeze,
1906 .thaw = stmmac_restore,
1907 .restore = stmmac_restore,
1908};
1909#else
1910static const struct dev_pm_ops stmmac_pm_ops;
1911#endif /* CONFIG_PM */
1899 1912
1913static struct platform_driver stmmac_driver = {
1914 .probe = stmmac_dvr_probe,
1915 .remove = stmmac_dvr_remove,
1916 .driver = {
1917 .name = STMMAC_RESOURCE_NAME,
1918 .owner = THIS_MODULE,
1919 .pm = &stmmac_pm_ops,
1920 },
1900}; 1921};
1901 1922
1902/** 1923/**
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index d7441616357..234b4068a1f 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -53,7 +53,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
53 int data; 53 int data;
54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
55 ((phyreg << 6) & (0x000007C0))); 55 ((phyreg << 6) & (0x000007C0)));
56 regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2); 56 regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
57 57
58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
59 writel(regValue, priv->ioaddr + mii_address); 59 writel(regValue, priv->ioaddr + mii_address);
@@ -85,7 +85,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
86 | MII_WRITE; 86 | MII_WRITE;
87 87
88 value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2); 88 value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
89 89
90 90
91 /* Wait until any existing MII operation is complete */ 91 /* Wait until any existing MII operation is complete */
@@ -114,7 +114,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
114 114
115 if (priv->phy_reset) { 115 if (priv->phy_reset) {
116 pr_debug("stmmac_mdio_reset: calling phy_reset\n"); 116 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
117 priv->phy_reset(priv->bsp_priv); 117 priv->phy_reset(priv->plat->bsp_priv);
118 } 118 }
119 119
120 /* This is a workaround for problems with the STE101P PHY. 120 /* This is a workaround for problems with the STE101P PHY.
@@ -157,7 +157,7 @@ int stmmac_mdio_register(struct net_device *ndev)
157 new_bus->read = &stmmac_mdio_read; 157 new_bus->read = &stmmac_mdio_read;
158 new_bus->write = &stmmac_mdio_write; 158 new_bus->write = &stmmac_mdio_write;
159 new_bus->reset = &stmmac_mdio_reset; 159 new_bus->reset = &stmmac_mdio_reset;
160 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 160 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
161 new_bus->priv = ndev; 161 new_bus->priv = ndev;
162 new_bus->irq = irqlist; 162 new_bus->irq = irqlist;
163 new_bus->phy_mask = priv->phy_mask; 163 new_bus->phy_mask = priv->phy_mask;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 30ccbb6d097..5faa87d86c6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -32,6 +32,7 @@
32#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/mdio.h>
35#include <linux/mii.h> 36#include <linux/mii.h>
36#include <linux/phy.h> 37#include <linux/phy.h>
37#include <linux/brcmphy.h> 38#include <linux/brcmphy.h>
@@ -69,10 +70,10 @@
69 70
70#define DRV_MODULE_NAME "tg3" 71#define DRV_MODULE_NAME "tg3"
71#define TG3_MAJ_NUM 3 72#define TG3_MAJ_NUM 3
72#define TG3_MIN_NUM 115 73#define TG3_MIN_NUM 116
73#define DRV_MODULE_VERSION \ 74#define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 75 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75#define DRV_MODULE_RELDATE "October 14, 2010" 76#define DRV_MODULE_RELDATE "December 3, 2010"
76 77
77#define TG3_DEF_MAC_MODE 0 78#define TG3_DEF_MAC_MODE 0
78#define TG3_DEF_RX_MODE 0 79#define TG3_DEF_RX_MODE 0
@@ -1769,9 +1770,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1769 1770
1770 if (tp->link_config.autoneg == AUTONEG_ENABLE && 1771 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1771 current_link_up == 1 && 1772 current_link_up == 1 &&
1772 (tp->link_config.active_speed == SPEED_1000 || 1773 tp->link_config.active_duplex == DUPLEX_FULL &&
1773 (tp->link_config.active_speed == SPEED_100 && 1774 (tp->link_config.active_speed == SPEED_100 ||
1774 tp->link_config.active_duplex == DUPLEX_FULL))) { 1775 tp->link_config.active_speed == SPEED_1000)) {
1775 u32 eeectl; 1776 u32 eeectl;
1776 1777
1777 if (tp->link_config.active_speed == SPEED_1000) 1778 if (tp->link_config.active_speed == SPEED_1000)
@@ -1781,7 +1782,8 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1781 1782
1782 tw32(TG3_CPMU_EEE_CTRL, eeectl); 1783 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1783 1784
1784 tg3_phy_cl45_read(tp, 0x7, TG3_CL45_D7_EEERES_STAT, &val); 1785 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1786 TG3_CL45_D7_EEERES_STAT, &val);
1785 1787
1786 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 1788 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1787 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) 1789 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
@@ -2728,12 +2730,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2728 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))) 2730 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2729 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 2731 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2730 2732
2731 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 2733 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2732 mac_mode |= tp->mac_mode & 2734 mac_mode |= MAC_MODE_APE_TX_EN |
2733 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); 2735 MAC_MODE_APE_RX_EN |
2734 if (mac_mode & MAC_MODE_APE_TX_EN) 2736 MAC_MODE_TDE_ENABLE;
2735 mac_mode |= MAC_MODE_TDE_ENABLE;
2736 }
2737 2737
2738 tw32_f(MAC_MODE, mac_mode); 2738 tw32_f(MAC_MODE, mac_mode);
2739 udelay(100); 2739 udelay(100);
@@ -2969,7 +2969,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2969 } 2969 }
2970 2970
2971 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { 2971 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
2972 u32 val = 0; 2972 u32 val;
2973 2973
2974 tw32(TG3_CPMU_EEE_MODE, 2974 tw32(TG3_CPMU_EEE_MODE,
2975 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2975 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
@@ -2986,19 +2986,18 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2986 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, 2986 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
2987 val | MII_TG3_DSP_CH34TP2_HIBW01); 2987 val | MII_TG3_DSP_CH34TP2_HIBW01);
2988 2988
2989 val = 0;
2989 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 2990 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2990 /* Advertise 100-BaseTX EEE ability */ 2991 /* Advertise 100-BaseTX EEE ability */
2991 if (tp->link_config.advertising & 2992 if (tp->link_config.advertising &
2992 (ADVERTISED_100baseT_Half | 2993 ADVERTISED_100baseT_Full)
2993 ADVERTISED_100baseT_Full)) 2994 val |= MDIO_AN_EEE_ADV_100TX;
2994 val |= TG3_CL45_D7_EEEADV_CAP_100TX;
2995 /* Advertise 1000-BaseT EEE ability */ 2995 /* Advertise 1000-BaseT EEE ability */
2996 if (tp->link_config.advertising & 2996 if (tp->link_config.advertising &
2997 (ADVERTISED_1000baseT_Half | 2997 ADVERTISED_1000baseT_Full)
2998 ADVERTISED_1000baseT_Full)) 2998 val |= MDIO_AN_EEE_ADV_1000T;
2999 val |= TG3_CL45_D7_EEEADV_CAP_1000T;
3000 } 2999 }
3001 tg3_phy_cl45_write(tp, 0x7, TG3_CL45_D7_EEEADV_CAP, val); 3000 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3002 3001
3003 /* Turn off SM_DSP clock. */ 3002 /* Turn off SM_DSP clock. */
3004 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | 3003 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
@@ -5763,7 +5762,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5763 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5762 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5764 5763
5765 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5764 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5766 !mss && skb->len > ETH_DATA_LEN) 5765 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5767 base_flags |= TXD_FLAG_JMB_PKT; 5766 base_flags |= TXD_FLAG_JMB_PKT;
5768 5767
5769 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 5768 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5997,7 +5996,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5997#endif 5996#endif
5998 5997
5999 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5998 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
6000 !mss && skb->len > ETH_DATA_LEN) 5999 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6001 base_flags |= TXD_FLAG_JMB_PKT; 6000 base_flags |= TXD_FLAG_JMB_PKT;
6002 6001
6003 len = skb_headlen(skb); 6002 len = skb_headlen(skb);
@@ -6339,13 +6338,13 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
6339 kfree(tpr->rx_jmb_buffers); 6338 kfree(tpr->rx_jmb_buffers);
6340 tpr->rx_jmb_buffers = NULL; 6339 tpr->rx_jmb_buffers = NULL;
6341 if (tpr->rx_std) { 6340 if (tpr->rx_std) {
6342 pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp), 6341 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6343 tpr->rx_std, tpr->rx_std_mapping); 6342 tpr->rx_std, tpr->rx_std_mapping);
6344 tpr->rx_std = NULL; 6343 tpr->rx_std = NULL;
6345 } 6344 }
6346 if (tpr->rx_jmb) { 6345 if (tpr->rx_jmb) {
6347 pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp), 6346 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6348 tpr->rx_jmb, tpr->rx_jmb_mapping); 6347 tpr->rx_jmb, tpr->rx_jmb_mapping);
6349 tpr->rx_jmb = NULL; 6348 tpr->rx_jmb = NULL;
6350 } 6349 }
6351} 6350}
@@ -6358,8 +6357,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
6358 if (!tpr->rx_std_buffers) 6357 if (!tpr->rx_std_buffers)
6359 return -ENOMEM; 6358 return -ENOMEM;
6360 6359
6361 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp), 6360 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6362 &tpr->rx_std_mapping); 6361 TG3_RX_STD_RING_BYTES(tp),
6362 &tpr->rx_std_mapping,
6363 GFP_KERNEL);
6363 if (!tpr->rx_std) 6364 if (!tpr->rx_std)
6364 goto err_out; 6365 goto err_out;
6365 6366
@@ -6370,9 +6371,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
6370 if (!tpr->rx_jmb_buffers) 6371 if (!tpr->rx_jmb_buffers)
6371 goto err_out; 6372 goto err_out;
6372 6373
6373 tpr->rx_jmb = pci_alloc_consistent(tp->pdev, 6374 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6374 TG3_RX_JMB_RING_BYTES(tp), 6375 TG3_RX_JMB_RING_BYTES(tp),
6375 &tpr->rx_jmb_mapping); 6376 &tpr->rx_jmb_mapping,
6377 GFP_KERNEL);
6376 if (!tpr->rx_jmb) 6378 if (!tpr->rx_jmb)
6377 goto err_out; 6379 goto err_out;
6378 } 6380 }
@@ -6491,7 +6493,7 @@ static void tg3_free_consistent(struct tg3 *tp)
6491 struct tg3_napi *tnapi = &tp->napi[i]; 6493 struct tg3_napi *tnapi = &tp->napi[i];
6492 6494
6493 if (tnapi->tx_ring) { 6495 if (tnapi->tx_ring) {
6494 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, 6496 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6495 tnapi->tx_ring, tnapi->tx_desc_mapping); 6497 tnapi->tx_ring, tnapi->tx_desc_mapping);
6496 tnapi->tx_ring = NULL; 6498 tnapi->tx_ring = NULL;
6497 } 6499 }
@@ -6500,25 +6502,26 @@ static void tg3_free_consistent(struct tg3 *tp)
6500 tnapi->tx_buffers = NULL; 6502 tnapi->tx_buffers = NULL;
6501 6503
6502 if (tnapi->rx_rcb) { 6504 if (tnapi->rx_rcb) {
6503 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), 6505 dma_free_coherent(&tp->pdev->dev,
6504 tnapi->rx_rcb, 6506 TG3_RX_RCB_RING_BYTES(tp),
6505 tnapi->rx_rcb_mapping); 6507 tnapi->rx_rcb,
6508 tnapi->rx_rcb_mapping);
6506 tnapi->rx_rcb = NULL; 6509 tnapi->rx_rcb = NULL;
6507 } 6510 }
6508 6511
6509 tg3_rx_prodring_fini(tp, &tnapi->prodring); 6512 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6510 6513
6511 if (tnapi->hw_status) { 6514 if (tnapi->hw_status) {
6512 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, 6515 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6513 tnapi->hw_status, 6516 tnapi->hw_status,
6514 tnapi->status_mapping); 6517 tnapi->status_mapping);
6515 tnapi->hw_status = NULL; 6518 tnapi->hw_status = NULL;
6516 } 6519 }
6517 } 6520 }
6518 6521
6519 if (tp->hw_stats) { 6522 if (tp->hw_stats) {
6520 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), 6523 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6521 tp->hw_stats, tp->stats_mapping); 6524 tp->hw_stats, tp->stats_mapping);
6522 tp->hw_stats = NULL; 6525 tp->hw_stats = NULL;
6523 } 6526 }
6524} 6527}
@@ -6531,9 +6534,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6531{ 6534{
6532 int i; 6535 int i;
6533 6536
6534 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6537 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6535 sizeof(struct tg3_hw_stats), 6538 sizeof(struct tg3_hw_stats),
6536 &tp->stats_mapping); 6539 &tp->stats_mapping,
6540 GFP_KERNEL);
6537 if (!tp->hw_stats) 6541 if (!tp->hw_stats)
6538 goto err_out; 6542 goto err_out;
6539 6543
@@ -6543,9 +6547,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6543 struct tg3_napi *tnapi = &tp->napi[i]; 6547 struct tg3_napi *tnapi = &tp->napi[i];
6544 struct tg3_hw_status *sblk; 6548 struct tg3_hw_status *sblk;
6545 6549
6546 tnapi->hw_status = pci_alloc_consistent(tp->pdev, 6550 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6547 TG3_HW_STATUS_SIZE, 6551 TG3_HW_STATUS_SIZE,
6548 &tnapi->status_mapping); 6552 &tnapi->status_mapping,
6553 GFP_KERNEL);
6549 if (!tnapi->hw_status) 6554 if (!tnapi->hw_status)
6550 goto err_out; 6555 goto err_out;
6551 6556
@@ -6566,9 +6571,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6566 if (!tnapi->tx_buffers) 6571 if (!tnapi->tx_buffers)
6567 goto err_out; 6572 goto err_out;
6568 6573
6569 tnapi->tx_ring = pci_alloc_consistent(tp->pdev, 6574 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6570 TG3_TX_RING_BYTES, 6575 TG3_TX_RING_BYTES,
6571 &tnapi->tx_desc_mapping); 6576 &tnapi->tx_desc_mapping,
6577 GFP_KERNEL);
6572 if (!tnapi->tx_ring) 6578 if (!tnapi->tx_ring)
6573 goto err_out; 6579 goto err_out;
6574 } 6580 }
@@ -6601,9 +6607,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6601 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) 6607 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6602 continue; 6608 continue;
6603 6609
6604 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, 6610 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6605 TG3_RX_RCB_RING_BYTES(tp), 6611 TG3_RX_RCB_RING_BYTES(tp),
6606 &tnapi->rx_rcb_mapping); 6612 &tnapi->rx_rcb_mapping,
6613 GFP_KERNEL);
6607 if (!tnapi->rx_rcb) 6614 if (!tnapi->rx_rcb)
6608 goto err_out; 6615 goto err_out;
6609 6616
@@ -6987,7 +6994,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
6987 6994
6988 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { 6995 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6989 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 6996 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6990 pcie_set_readrq(tp->pdev, 4096); 6997 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
6991 else { 6998 else {
6992 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 6999 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6993 tp->pci_cacheline_sz); 7000 tp->pci_cacheline_sz);
@@ -7181,7 +7188,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7181 tp->pcie_cap + PCI_EXP_DEVCTL, 7188 tp->pcie_cap + PCI_EXP_DEVCTL,
7182 val16); 7189 val16);
7183 7190
7184 pcie_set_readrq(tp->pdev, 4096); 7191 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7185 7192
7186 /* Clear error status */ 7193 /* Clear error status */
7187 pci_write_config_word(tp->pdev, 7194 pci_write_config_word(tp->pdev,
@@ -7222,19 +7229,21 @@ static int tg3_chip_reset(struct tg3 *tp)
7222 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 7229 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7223 } 7230 }
7224 7231
7232 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7233 tp->mac_mode = MAC_MODE_APE_TX_EN |
7234 MAC_MODE_APE_RX_EN |
7235 MAC_MODE_TDE_ENABLE;
7236
7225 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 7237 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7226 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 7238 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7227 tw32_f(MAC_MODE, tp->mac_mode); 7239 val = tp->mac_mode;
7228 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 7240 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7229 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 7241 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7230 tw32_f(MAC_MODE, tp->mac_mode); 7242 val = tp->mac_mode;
7231 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7232 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7233 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7234 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7235 tw32_f(MAC_MODE, tp->mac_mode);
7236 } else 7243 } else
7237 tw32_f(MAC_MODE, 0); 7244 val = 0;
7245
7246 tw32_f(MAC_MODE, val);
7238 udelay(40); 7247 udelay(40);
7239 7248
7240 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 7249 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
@@ -7801,6 +7810,37 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7801 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) 7810 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7802 tg3_abort_hw(tp, 1); 7811 tg3_abort_hw(tp, 1);
7803 7812
7813 /* Enable MAC control of LPI */
7814 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7815 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7816 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7817 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7818
7819 tw32_f(TG3_CPMU_EEE_CTRL,
7820 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7821
7822 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7823 TG3_CPMU_EEEMD_LPI_IN_TX |
7824 TG3_CPMU_EEEMD_LPI_IN_RX |
7825 TG3_CPMU_EEEMD_EEE_ENABLE;
7826
7827 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7828 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7829
7830 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7831 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7832
7833 tw32_f(TG3_CPMU_EEE_MODE, val);
7834
7835 tw32_f(TG3_CPMU_EEE_DBTMR1,
7836 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7837 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7838
7839 tw32_f(TG3_CPMU_EEE_DBTMR2,
7840 TG3_CPMU_DBTMR1_APE_TX_2047US |
7841 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7842 }
7843
7804 if (reset_phy) 7844 if (reset_phy)
7805 tg3_phy_reset(tp); 7845 tg3_phy_reset(tp);
7806 7846
@@ -7860,18 +7900,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7860 tw32(GRC_MODE, grc_mode); 7900 tw32(GRC_MODE, grc_mode);
7861 } 7901 }
7862 7902
7863 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { 7903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7864 u32 grc_mode = tr32(GRC_MODE); 7904 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7905 u32 grc_mode = tr32(GRC_MODE);
7865 7906
7866 /* Access the lower 1K of PL PCIE block registers. */ 7907 /* Access the lower 1K of PL PCIE block registers. */
7867 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 7908 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7868 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 7909 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7869 7910
7870 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5); 7911 val = tr32(TG3_PCIE_TLDLPL_PORT +
7871 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 7912 TG3_PCIE_PL_LO_PHYCTL5);
7872 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 7913 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7914 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7873 7915
7874 tw32(GRC_MODE, grc_mode); 7916 tw32(GRC_MODE, grc_mode);
7917 }
7875 7918
7876 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 7919 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7877 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 7920 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
@@ -7879,22 +7922,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7879 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 7922 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7880 } 7923 }
7881 7924
7882 /* Enable MAC control of LPI */
7883 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7884 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7885 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7886 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7887
7888 tw32_f(TG3_CPMU_EEE_CTRL,
7889 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7890
7891 tw32_f(TG3_CPMU_EEE_MODE,
7892 TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7893 TG3_CPMU_EEEMD_LPI_IN_TX |
7894 TG3_CPMU_EEEMD_LPI_IN_RX |
7895 TG3_CPMU_EEEMD_EEE_ENABLE);
7896 }
7897
7898 /* This works around an issue with Athlon chipsets on 7925 /* This works around an issue with Athlon chipsets on
7899 * B3 tigon3 silicon. This bit has no effect on any 7926 * B3 tigon3 silicon. This bit has no effect on any
7900 * other revision. But do not set this on PCI Express 7927 * other revision. But do not set this on PCI Express
@@ -8162,8 +8189,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8162 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 8189 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8163 RDMAC_MODE_LNGREAD_ENAB); 8190 RDMAC_MODE_LNGREAD_ENAB);
8164 8191
8165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8167 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 8193 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8168 8194
8169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 8195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8203,6 +8229,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8203 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 8229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8204 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 8230 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8205 val = tr32(TG3_RDMA_RSRVCTRL_REG); 8231 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8232 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8233 val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
8234 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
8235 }
8206 tw32(TG3_RDMA_RSRVCTRL_REG, 8236 tw32(TG3_RDMA_RSRVCTRL_REG,
8207 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 8237 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8208 } 8238 }
@@ -8280,7 +8310,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8280 } 8310 }
8281 8311
8282 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 8312 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8283 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 8313 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8284 else 8314 else
8285 tp->mac_mode = 0; 8315 tp->mac_mode = 0;
8286 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 8316 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
@@ -9031,8 +9061,14 @@ static bool tg3_enable_msix(struct tg3 *tp)
9031 pci_disable_msix(tp->pdev); 9061 pci_disable_msix(tp->pdev);
9032 return false; 9062 return false;
9033 } 9063 }
9034 if (tp->irq_cnt > 1) 9064
9065 if (tp->irq_cnt > 1) {
9035 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; 9066 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9068 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
9069 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9070 }
9071 }
9036 9072
9037 return true; 9073 return true;
9038} 9074}
@@ -12411,8 +12447,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12411 if (cfg2 & (1 << 18)) 12447 if (cfg2 & (1 << 18))
12412 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 12448 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12413 12449
12414 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 12450 if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
12415 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && 12451 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12452 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
12416 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 12453 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12417 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 12454 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12418 12455
@@ -12548,9 +12585,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12548 } 12585 }
12549 } 12586 }
12550 12587
12551 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 12588 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12552 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && 12589 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12553 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)) 12590 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12591 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12592 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12554 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 12593 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12555 12594
12556 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 12595 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
@@ -13359,7 +13398,45 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13359 13398
13360 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13399 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13361 13400
13362 pcie_set_readrq(tp->pdev, 4096); 13401 tp->pcie_readrq = 4096;
13402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13403 u16 word;
13404
13405 pci_read_config_word(tp->pdev,
13406 tp->pcie_cap + PCI_EXP_LNKSTA,
13407 &word);
13408 switch (word & PCI_EXP_LNKSTA_CLS) {
13409 case PCI_EXP_LNKSTA_CLS_2_5GB:
13410 word &= PCI_EXP_LNKSTA_NLW;
13411 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13412 switch (word) {
13413 case 2:
13414 tp->pcie_readrq = 2048;
13415 break;
13416 case 4:
13417 tp->pcie_readrq = 1024;
13418 break;
13419 }
13420 break;
13421
13422 case PCI_EXP_LNKSTA_CLS_5_0GB:
13423 word &= PCI_EXP_LNKSTA_NLW;
13424 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13425 switch (word) {
13426 case 1:
13427 tp->pcie_readrq = 2048;
13428 break;
13429 case 2:
13430 tp->pcie_readrq = 1024;
13431 break;
13432 case 4:
13433 tp->pcie_readrq = 512;
13434 break;
13435 }
13436 }
13437 }
13438
13439 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13363 13440
13364 pci_read_config_word(tp->pdev, 13441 pci_read_config_word(tp->pdev,
13365 tp->pcie_cap + PCI_EXP_LNKCTL, 13442 tp->pcie_cap + PCI_EXP_LNKCTL,
@@ -13722,8 +13799,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13722 13799
13723 /* Preserve the APE MAC_MODE bits */ 13800 /* Preserve the APE MAC_MODE bits */
13724 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 13801 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13725 tp->mac_mode = tr32(MAC_MODE) | 13802 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13726 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13727 else 13803 else
13728 tp->mac_mode = TG3_DEF_MAC_MODE; 13804 tp->mac_mode = TG3_DEF_MAC_MODE;
13729 13805
@@ -14159,7 +14235,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14159 u32 *buf, saved_dma_rwctrl; 14235 u32 *buf, saved_dma_rwctrl;
14160 int ret = 0; 14236 int ret = 0;
14161 14237
14162 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 14238 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14239 &buf_dma, GFP_KERNEL);
14163 if (!buf) { 14240 if (!buf) {
14164 ret = -ENOMEM; 14241 ret = -ENOMEM;
14165 goto out_nofree; 14242 goto out_nofree;
@@ -14343,7 +14420,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14343 } 14420 }
14344 14421
14345out: 14422out:
14346 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 14423 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14347out_nofree: 14424out_nofree:
14348 return ret; 14425 return ret;
14349} 14426}
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4a1974804b9..d62c8d937c8 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1094,13 +1094,19 @@
1094/* 0x3664 --> 0x36b0 unused */ 1094/* 0x3664 --> 0x36b0 unused */
1095 1095
1096#define TG3_CPMU_EEE_MODE 0x000036b0 1096#define TG3_CPMU_EEE_MODE 0x000036b0
1097#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008 1097#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004
1098#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080 1098#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008
1099#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100 1099#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040
1100#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200 1100#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080
1101#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000 1101#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100
1102/* 0x36b4 --> 0x36b8 unused */ 1102#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200
1103 1103#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000
1104#define TG3_CPMU_EEE_DBTMR1 0x000036b4
1105#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
1106#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
1107#define TG3_CPMU_EEE_DBTMR2 0x000036b8
1108#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000
1109#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
1104#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc 1110#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
1105#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 1111#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
1106#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004 1112#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
@@ -1327,6 +1333,8 @@
1327 1333
1328#define TG3_RDMA_RSRVCTRL_REG 0x00004900 1334#define TG3_RDMA_RSRVCTRL_REG 0x00004900
1329#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 1335#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
1336#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
1337#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
1330/* 0x4904 --> 0x4910 unused */ 1338/* 0x4904 --> 0x4910 unused */
1331 1339
1332#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 1340#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
@@ -2170,9 +2178,6 @@
2170#define MII_TG3_TEST1_CRC_EN 0x8000 2178#define MII_TG3_TEST1_CRC_EN 0x8000
2171 2179
2172/* Clause 45 expansion registers */ 2180/* Clause 45 expansion registers */
2173#define TG3_CL45_D7_EEEADV_CAP 0x003c
2174#define TG3_CL45_D7_EEEADV_CAP_100TX 0x0002
2175#define TG3_CL45_D7_EEEADV_CAP_1000T 0x0004
2176#define TG3_CL45_D7_EEERES_STAT 0x803e 2181#define TG3_CL45_D7_EEERES_STAT 0x803e
2177#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002 2182#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002
2178#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004 2183#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
@@ -2562,10 +2567,6 @@ struct ring_info {
2562 DEFINE_DMA_UNMAP_ADDR(mapping); 2567 DEFINE_DMA_UNMAP_ADDR(mapping);
2563}; 2568};
2564 2569
2565struct tg3_config_info {
2566 u32 flags;
2567};
2568
2569struct tg3_link_config { 2570struct tg3_link_config {
2570 /* Describes what we're trying to get. */ 2571 /* Describes what we're trying to get. */
2571 u32 advertising; 2572 u32 advertising;
@@ -2713,17 +2714,17 @@ struct tg3_napi {
2713 u32 last_irq_tag; 2714 u32 last_irq_tag;
2714 u32 int_mbox; 2715 u32 int_mbox;
2715 u32 coal_now; 2716 u32 coal_now;
2716 u32 tx_prod;
2717 u32 tx_cons;
2718 u32 tx_pending;
2719 u32 prodmbox;
2720 2717
2721 u32 consmbox; 2718 u32 consmbox ____cacheline_aligned;
2722 u32 rx_rcb_ptr; 2719 u32 rx_rcb_ptr;
2723 u16 *rx_rcb_prod_idx; 2720 u16 *rx_rcb_prod_idx;
2724 struct tg3_rx_prodring_set prodring; 2721 struct tg3_rx_prodring_set prodring;
2725
2726 struct tg3_rx_buffer_desc *rx_rcb; 2722 struct tg3_rx_buffer_desc *rx_rcb;
2723
2724 u32 tx_prod ____cacheline_aligned;
2725 u32 tx_cons;
2726 u32 tx_pending;
2727 u32 prodmbox;
2727 struct tg3_tx_buffer_desc *tx_ring; 2728 struct tg3_tx_buffer_desc *tx_ring;
2728 struct ring_info *tx_buffers; 2729 struct ring_info *tx_buffers;
2729 2730
@@ -2946,6 +2947,7 @@ struct tg3 {
2946 int pcix_cap; 2947 int pcix_cap;
2947 int pcie_cap; 2948 int pcie_cap;
2948 }; 2949 };
2950 int pcie_readrq;
2949 2951
2950 struct mii_bus *mdio_bus; 2952 struct mii_bus *mdio_bus;
2951 int mdio_irq[PHY_MAX_ADDR]; 2953 int mdio_irq[PHY_MAX_ADDR];
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 52ffabe6db0..6f600cced6e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -196,6 +196,25 @@ config USB_NET_CDC_EEM
196 IEEE 802 "local assignment" bit is set in the address, a "usbX" 196 IEEE 802 "local assignment" bit is set in the address, a "usbX"
197 name is used instead. 197 name is used instead.
198 198
199config USB_NET_CDC_NCM
200 tristate "CDC NCM support"
201 depends on USB_USBNET
202 default y
203 help
204 This driver provides support for CDC NCM (Network Control Model
205 Device USB Class Specification). The CDC NCM specification is
206 available from <http://www.usb.org/>.
207
208 Say "y" to link the driver statically, or "m" to build a
209 dynamically linked module.
210
211 This driver should work with at least the following devices:
212 * ST-Ericsson M700 LTE FDD/TDD Mobile Broadband Modem (ref. design)
213 * ST-Ericsson M5730 HSPA+ Mobile Broadband Modem (reference design)
214 * ST-Ericsson M570 HSPA+ Mobile Broadband Modem (reference design)
215 * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
216 * Ericsson F5521gw Mobile Broadband Module
217
199config USB_NET_DM9601 218config USB_NET_DM9601
200 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 219 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
201 depends on USB_USBNET 220 depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index a19b0259ae1..cac17030118 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -26,4 +26,5 @@ obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o 26obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o 28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
29obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
29 30
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
new file mode 100644
index 00000000000..593c104ab19
--- /dev/null
+++ b/drivers/net/usb/cdc_ncm.c
@@ -0,0 +1,1213 @@
1/*
2 * cdc_ncm.c
3 *
4 * Copyright (C) ST-Ericsson 2010
5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
7 *
8 * USB Host Driver for Network Control Model (NCM)
9 * http://www.usb.org/developers/devclass_docs/NCM10.zip
10 *
11 * The NCM encoding, decoding and initialization logic
12 * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
13 *
14 * This software is available to you under a choice of one of two
15 * licenses. You may choose this file to be licensed under the terms
16 * of the GNU General Public License (GPL) Version 2 or the 2-clause
17 * BSD license listed below:
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 */
40
41#include <linux/module.h>
42#include <linux/init.h>
43#include <linux/netdevice.h>
44#include <linux/ctype.h>
45#include <linux/ethtool.h>
46#include <linux/workqueue.h>
47#include <linux/mii.h>
48#include <linux/crc32.h>
49#include <linux/usb.h>
50#include <linux/version.h>
51#include <linux/timer.h>
52#include <linux/spinlock.h>
53#include <linux/atomic.h>
54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h>
56
57#define DRIVER_VERSION "30-Nov-2010"
58
59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
61
62/* Maximum NTB length */
63#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
64#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
65
66/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
67#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
68
69#define CDC_NCM_MIN_TX_PKT 512 /* bytes */
70
71/* Default value for MaxDatagramSize */
72#define CDC_NCM_MAX_DATAGRAM_SIZE 2048 /* bytes */
73
74/*
75 * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
76 * the last NULL entry. Any additional datagrams in NTB would be discarded.
77 */
78#define CDC_NCM_DPT_DATAGRAMS_MAX 32
79
80/* Restart the timer, if amount of datagrams is less than given value */
81#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
82
83/* The following macro defines the minimum header space */
84#define CDC_NCM_MIN_HDR_SIZE \
85 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
86 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
87
88struct connection_speed_change {
89 __le32 USBitRate; /* holds 3GPP downlink value, bits per second */
90 __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */
91} __attribute__ ((packed));
92
93struct cdc_ncm_data {
94 struct usb_cdc_ncm_nth16 nth16;
95 struct usb_cdc_ncm_ndp16 ndp16;
96 struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
97};
98
99struct cdc_ncm_ctx {
100 struct cdc_ncm_data rx_ncm;
101 struct cdc_ncm_data tx_ncm;
102 struct usb_cdc_ncm_ntb_parameters ncm_parm;
103 struct timer_list tx_timer;
104
105 const struct usb_cdc_ncm_desc *func_desc;
106 const struct usb_cdc_header_desc *header_desc;
107 const struct usb_cdc_union_desc *union_desc;
108 const struct usb_cdc_ether_desc *ether_desc;
109
110 struct net_device *netdev;
111 struct usb_device *udev;
112 struct usb_host_endpoint *in_ep;
113 struct usb_host_endpoint *out_ep;
114 struct usb_host_endpoint *status_ep;
115 struct usb_interface *intf;
116 struct usb_interface *control;
117 struct usb_interface *data;
118
119 struct sk_buff *tx_curr_skb;
120 struct sk_buff *tx_rem_skb;
121
122 spinlock_t mtx;
123
124 u32 tx_timer_pending;
125 u32 tx_curr_offset;
126 u32 tx_curr_last_offset;
127 u32 tx_curr_frame_num;
128 u32 rx_speed;
129 u32 tx_speed;
130 u32 rx_max;
131 u32 tx_max;
132 u32 max_datagram_size;
133 u16 tx_max_datagrams;
134 u16 tx_remainder;
135 u16 tx_modulus;
136 u16 tx_ndp_modulus;
137 u16 tx_seq;
138 u16 connected;
139 u8 data_claimed;
140 u8 control_claimed;
141};
142
143static void cdc_ncm_tx_timeout(unsigned long arg);
144static const struct driver_info cdc_ncm_info;
145static struct usb_driver cdc_ncm_driver;
146static struct ethtool_ops cdc_ncm_ethtool_ops;
147
148static const struct usb_device_id cdc_devs[] = {
149 { USB_INTERFACE_INFO(USB_CLASS_COMM,
150 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
151 .driver_info = (unsigned long)&cdc_ncm_info,
152 },
153 {
154 },
155};
156
157MODULE_DEVICE_TABLE(usb, cdc_devs);
158
159static void
160cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
161{
162 struct usbnet *dev = netdev_priv(net);
163
164 strncpy(info->driver, dev->driver_name, sizeof(info->driver));
165 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
166 strncpy(info->fw_version, dev->driver_info->description,
167 sizeof(info->fw_version));
168 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
169}
170
171static int
172cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
173 void *data, u16 flags, u16 *actlen, u16 timeout)
174{
175 int err;
176
177 err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
178 usb_rcvctrlpipe(ctx->udev, 0) :
179 usb_sndctrlpipe(ctx->udev, 0),
180 req->bNotificationType, req->bmRequestType,
181 req->wValue,
182 req->wIndex, data,
183 req->wLength, timeout);
184
185 if (err < 0) {
186 if (actlen)
187 *actlen = 0;
188 return err;
189 }
190
191 if (actlen)
192 *actlen = err;
193
194 return 0;
195}
196
197static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
198{
199 struct usb_cdc_notification req;
200 u32 val;
201 __le16 max_datagram_size;
202 u8 flags;
203 u8 iface_no;
204 int err;
205
206 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
207
208 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
209 req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
210 req.wValue = 0;
211 req.wIndex = cpu_to_le16(iface_no);
212 req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
213
214 err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
215 if (err) {
216 pr_debug("failed GET_NTB_PARAMETERS\n");
217 return 1;
218 }
219
220 /* read correct set of parameters according to device mode */
221 ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
222 ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
223 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
224 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
225 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
226
227 if (ctx->func_desc != NULL)
228 flags = ctx->func_desc->bmNetworkCapabilities;
229 else
230 flags = 0;
231
232 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
233 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
234 "wNdpOutAlignment=%u flags=0x%x\n",
235 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
236 ctx->tx_ndp_modulus, flags);
237
238 /* max count of tx datagrams without terminating NULL entry */
239 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
240
241 /* verify maximum size of received NTB in bytes */
242 if ((ctx->rx_max <
243 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
244 (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
245 pr_debug("Using default maximum receive length=%d\n",
246 CDC_NCM_NTB_MAX_SIZE_RX);
247 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
248 }
249
250 /* verify maximum size of transmitted NTB in bytes */
251 if ((ctx->tx_max <
252 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
253 (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
254 pr_debug("Using default maximum transmit length=%d\n",
255 CDC_NCM_NTB_MAX_SIZE_TX);
256 ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
257 }
258
259 /*
260 * verify that the structure alignment is:
261 * - power of two
262 * - not greater than the maximum transmit length
263 * - not less than four bytes
264 */
265 val = ctx->tx_ndp_modulus;
266
267 if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
268 (val != ((-val) & val)) || (val >= ctx->tx_max)) {
269 pr_debug("Using default alignment: 4 bytes\n");
270 ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
271 }
272
273 /*
274 * verify that the payload alignment is:
275 * - power of two
276 * - not greater than the maximum transmit length
277 * - not less than four bytes
278 */
279 val = ctx->tx_modulus;
280
281 if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
282 (val != ((-val) & val)) || (val >= ctx->tx_max)) {
283 pr_debug("Using default transmit modulus: 4 bytes\n");
284 ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
285 }
286
287 /* verify the payload remainder */
288 if (ctx->tx_remainder >= ctx->tx_modulus) {
289 pr_debug("Using default transmit remainder: 0 bytes\n");
290 ctx->tx_remainder = 0;
291 }
292
293 /* adjust TX-remainder according to NCM specification. */
294 ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) &
295 (ctx->tx_modulus - 1));
296
297 /* additional configuration */
298
299 /* set CRC Mode */
300 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
301 req.bNotificationType = USB_CDC_SET_CRC_MODE;
302 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
303 req.wIndex = cpu_to_le16(iface_no);
304 req.wLength = 0;
305
306 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
307 if (err)
308 pr_debug("Setting CRC mode off failed\n");
309
310 /* set NTB format */
311 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
312 req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
313 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
314 req.wIndex = cpu_to_le16(iface_no);
315 req.wLength = 0;
316
317 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
318 if (err)
319 pr_debug("Setting NTB format to 16-bit failed\n");
320
321 /* set Max Datagram Size (MTU) */
322 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
323 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
324 req.wValue = 0;
325 req.wIndex = cpu_to_le16(iface_no);
326 req.wLength = cpu_to_le16(2);
327
328 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
329 if (err) {
330 pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
331 CDC_NCM_MIN_DATAGRAM_SIZE);
332 /* use default */
333 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
334 } else {
335 ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
336
337 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
338 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
339 else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
340 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
341 }
342
343 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
344 ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
345
346 return 0;
347}
348
349static void
350cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
351{
352 struct usb_host_endpoint *e;
353 u8 ep;
354
355 for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
356
357 e = intf->cur_altsetting->endpoint + ep;
358 switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
359 case USB_ENDPOINT_XFER_INT:
360 if (usb_endpoint_dir_in(&e->desc)) {
361 if (ctx->status_ep == NULL)
362 ctx->status_ep = e;
363 }
364 break;
365
366 case USB_ENDPOINT_XFER_BULK:
367 if (usb_endpoint_dir_in(&e->desc)) {
368 if (ctx->in_ep == NULL)
369 ctx->in_ep = e;
370 } else {
371 if (ctx->out_ep == NULL)
372 ctx->out_ep = e;
373 }
374 break;
375
376 default:
377 break;
378 }
379 }
380}
381
382static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
383{
384 if (ctx == NULL)
385 return;
386
387 del_timer_sync(&ctx->tx_timer);
388
389 if (ctx->data_claimed) {
390 usb_set_intfdata(ctx->data, NULL);
391 usb_driver_release_interface(driver_of(ctx->intf), ctx->data);
392 }
393
394 if (ctx->control_claimed) {
395 usb_set_intfdata(ctx->control, NULL);
396 usb_driver_release_interface(driver_of(ctx->intf),
397 ctx->control);
398 }
399
400 if (ctx->tx_rem_skb != NULL) {
401 dev_kfree_skb_any(ctx->tx_rem_skb);
402 ctx->tx_rem_skb = NULL;
403 }
404
405 if (ctx->tx_curr_skb != NULL) {
406 dev_kfree_skb_any(ctx->tx_curr_skb);
407 ctx->tx_curr_skb = NULL;
408 }
409
410 kfree(ctx);
411}
412
413static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
414{
415 struct cdc_ncm_ctx *ctx;
416 struct usb_driver *driver;
417 u8 *buf;
418 int len;
419 int temp;
420 u8 iface_no;
421
422 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
423 if (ctx == NULL)
424 goto error;
425
426 memset(ctx, 0, sizeof(*ctx));
427
428 init_timer(&ctx->tx_timer);
429 spin_lock_init(&ctx->mtx);
430 ctx->netdev = dev->net;
431
432 /* store ctx pointer in device data field */
433 dev->data[0] = (unsigned long)ctx;
434
435 /* get some pointers */
436 driver = driver_of(intf);
437 buf = intf->cur_altsetting->extra;
438 len = intf->cur_altsetting->extralen;
439
440 ctx->udev = dev->udev;
441 ctx->intf = intf;
442
443 /* parse through descriptors associated with control interface */
444 while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
445
446 if (buf[1] != USB_DT_CS_INTERFACE)
447 goto advance;
448
449 switch (buf[2]) {
450 case USB_CDC_UNION_TYPE:
451 if (buf[0] < sizeof(*(ctx->union_desc)))
452 break;
453
454 ctx->union_desc =
455 (const struct usb_cdc_union_desc *)buf;
456
457 ctx->control = usb_ifnum_to_if(dev->udev,
458 ctx->union_desc->bMasterInterface0);
459 ctx->data = usb_ifnum_to_if(dev->udev,
460 ctx->union_desc->bSlaveInterface0);
461 break;
462
463 case USB_CDC_ETHERNET_TYPE:
464 if (buf[0] < sizeof(*(ctx->ether_desc)))
465 break;
466
467 ctx->ether_desc =
468 (const struct usb_cdc_ether_desc *)buf;
469
470 dev->hard_mtu =
471 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
472
473 if (dev->hard_mtu <
474 (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
475 dev->hard_mtu =
476 CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
477
478 else if (dev->hard_mtu >
479 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
480 dev->hard_mtu =
481 CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
482 break;
483
484 case USB_CDC_NCM_TYPE:
485 if (buf[0] < sizeof(*(ctx->func_desc)))
486 break;
487
488 ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
489 break;
490
491 default:
492 break;
493 }
494advance:
495 /* advance to next descriptor */
496 temp = buf[0];
497 buf += temp;
498 len -= temp;
499 }
500
501 /* check if we got everything */
502 if ((ctx->control == NULL) || (ctx->data == NULL) ||
503 (ctx->ether_desc == NULL))
504 goto error;
505
506 /* claim interfaces, if any */
507 if (ctx->data != intf) {
508 temp = usb_driver_claim_interface(driver, ctx->data, dev);
509 if (temp)
510 goto error;
511 ctx->data_claimed = 1;
512 }
513
514 if (ctx->control != intf) {
515 temp = usb_driver_claim_interface(driver, ctx->control, dev);
516 if (temp)
517 goto error;
518 ctx->control_claimed = 1;
519 }
520
521 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
522
523 /* reset data interface */
524 temp = usb_set_interface(dev->udev, iface_no, 0);
525 if (temp)
526 goto error;
527
528 /* initialize data interface */
529 if (cdc_ncm_setup(ctx))
530 goto error;
531
532 /* configure data interface */
533 temp = usb_set_interface(dev->udev, iface_no, 1);
534 if (temp)
535 goto error;
536
537 cdc_ncm_find_endpoints(ctx, ctx->data);
538 cdc_ncm_find_endpoints(ctx, ctx->control);
539
540 if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
541 (ctx->status_ep == NULL))
542 goto error;
543
544 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
545
546 usb_set_intfdata(ctx->data, dev);
547 usb_set_intfdata(ctx->control, dev);
548 usb_set_intfdata(ctx->intf, dev);
549
550 temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
551 if (temp)
552 goto error;
553
554 dev_info(&dev->udev->dev, "MAC-Address: "
555 "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
556 dev->net->dev_addr[0], dev->net->dev_addr[1],
557 dev->net->dev_addr[2], dev->net->dev_addr[3],
558 dev->net->dev_addr[4], dev->net->dev_addr[5]);
559
560 dev->in = usb_rcvbulkpipe(dev->udev,
561 ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
562 dev->out = usb_sndbulkpipe(dev->udev,
563 ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
564 dev->status = ctx->status_ep;
565 dev->rx_urb_size = ctx->rx_max;
566
567 /*
568 * We should get an event when network connection is "connected" or
569 * "disconnected". Set network connection in "disconnected" state
570 * (carrier is OFF) during attach, so the IP network stack does not
571 * start IPv6 negotiation and more.
572 */
573 netif_carrier_off(dev->net);
574 ctx->tx_speed = ctx->rx_speed = 0;
575 return 0;
576
577error:
578 cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
579 dev->data[0] = 0;
580 dev_info(&dev->udev->dev, "Descriptor failure\n");
581 return -ENODEV;
582}
583
584static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
585{
586 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
587 struct usb_driver *driver;
588
589 if (ctx == NULL)
590 return; /* no setup */
591
592 driver = driver_of(intf);
593
594 usb_set_intfdata(ctx->data, NULL);
595 usb_set_intfdata(ctx->control, NULL);
596 usb_set_intfdata(ctx->intf, NULL);
597
598 /* release interfaces, if any */
599 if (ctx->data_claimed) {
600 usb_driver_release_interface(driver, ctx->data);
601 ctx->data_claimed = 0;
602 }
603
604 if (ctx->control_claimed) {
605 usb_driver_release_interface(driver, ctx->control);
606 ctx->control_claimed = 0;
607 }
608
609 cdc_ncm_free(ctx);
610}
611
612static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max)
613{
614 if (first >= max)
615 return;
616 if (first >= end)
617 return;
618 if (end > max)
619 end = max;
620 memset(ptr + first, 0, end - first);
621}
622
623static struct sk_buff *
624cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
625{
626 struct sk_buff *skb_out;
627 u32 rem;
628 u32 offset;
629 u32 last_offset;
630 u16 n = 0;
631 u8 timeout = 0;
632
633 /* if there is a remaining skb, it gets priority */
634 if (skb != NULL)
635 swap(skb, ctx->tx_rem_skb);
636 else
637 timeout = 1;
638
639 /*
640 * +----------------+
641 * | skb_out |
642 * +----------------+
643 * ^ offset
644 * ^ last_offset
645 */
646
647 /* check if we are resuming an OUT skb */
648 if (ctx->tx_curr_skb != NULL) {
649 /* pop variables */
650 skb_out = ctx->tx_curr_skb;
651 offset = ctx->tx_curr_offset;
652 last_offset = ctx->tx_curr_last_offset;
653 n = ctx->tx_curr_frame_num;
654
655 } else {
656 /* reset variables */
657 skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
658 if (skb_out == NULL) {
659 if (skb != NULL) {
660 dev_kfree_skb_any(skb);
661 ctx->netdev->stats.tx_dropped++;
662 }
663 goto exit_no_skb;
664 }
665
666 /* make room for NTH and NDP */
667 offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
668 ctx->tx_ndp_modulus) +
669 sizeof(struct usb_cdc_ncm_ndp16) +
670 (ctx->tx_max_datagrams + 1) *
671 sizeof(struct usb_cdc_ncm_dpe16);
672
673 /* store last valid offset before alignment */
674 last_offset = offset;
675 /* align first Datagram offset correctly */
676 offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
677 /* zero buffer till the first IP datagram */
678 cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
679 n = 0;
680 ctx->tx_curr_frame_num = 0;
681 }
682
683 for (; n < ctx->tx_max_datagrams; n++) {
684 /* check if end of transmit buffer is reached */
685 if (offset >= ctx->tx_max)
686 break;
687
688 /* compute maximum buffer size */
689 rem = ctx->tx_max - offset;
690
691 if (skb == NULL) {
692 skb = ctx->tx_rem_skb;
693 ctx->tx_rem_skb = NULL;
694
695 /* check for end of skb */
696 if (skb == NULL)
697 break;
698 }
699
700 if (skb->len > rem) {
701 if (n == 0) {
702 /* won't fit, MTU problem? */
703 dev_kfree_skb_any(skb);
704 skb = NULL;
705 ctx->netdev->stats.tx_dropped++;
706 } else {
707 /* no room for skb - store for later */
708 if (ctx->tx_rem_skb != NULL) {
709 dev_kfree_skb_any(ctx->tx_rem_skb);
710 ctx->netdev->stats.tx_dropped++;
711 }
712 ctx->tx_rem_skb = skb;
713 skb = NULL;
714
715 /* loop one more time */
716 timeout = 1;
717 }
718 break;
719 }
720
721 memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len);
722
723 ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len);
724 ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
725
726 /* update offset */
727 offset += skb->len;
728
729 /* store last valid offset before alignment */
730 last_offset = offset;
731
732 /* align offset correctly */
733 offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
734
735 /* zero padding */
736 cdc_ncm_zero_fill(skb_out->data, last_offset, offset,
737 ctx->tx_max);
738 dev_kfree_skb_any(skb);
739 skb = NULL;
740 }
741
742 /* free up any dangling skb */
743 if (skb != NULL) {
744 dev_kfree_skb_any(skb);
745 skb = NULL;
746 ctx->netdev->stats.tx_dropped++;
747 }
748
749 ctx->tx_curr_frame_num = n;
750
751 if (n == 0) {
752 /* wait for more frames */
753 /* push variables */
754 ctx->tx_curr_skb = skb_out;
755 ctx->tx_curr_offset = offset;
756 ctx->tx_curr_last_offset = last_offset;
757 goto exit_no_skb;
758
759 } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
760 /* wait for more frames */
761 /* push variables */
762 ctx->tx_curr_skb = skb_out;
763 ctx->tx_curr_offset = offset;
764 ctx->tx_curr_last_offset = last_offset;
765 /* set the pending count */
766 if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
767 ctx->tx_timer_pending = 2;
768 goto exit_no_skb;
769
770 } else {
771 /* frame goes out */
772 /* variables will be reset at next call */
773 }
774
775 /* check for overflow */
776 if (last_offset > ctx->tx_max)
777 last_offset = ctx->tx_max;
778
779 /* revert offset */
780 offset = last_offset;
781
782 /*
783 * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
784 * we send buffers as it is. If we get more data, it would be more
785 * efficient for USB HS mobile device with DMA engine to receive a full
786 * size NTB, than canceling DMA transfer and receiving a short packet.
787 */
788 if (offset > CDC_NCM_MIN_TX_PKT)
789 offset = ctx->tx_max;
790
791 /* final zero padding */
792 cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
793
794 /* store last offset */
795 last_offset = offset;
796
797 if ((last_offset < ctx->tx_max) && ((last_offset %
798 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) {
799 /* force short packet */
800 *(((u8 *)skb_out->data) + last_offset) = 0;
801 last_offset++;
802 }
803
804 /* zero the rest of the DPEs plus the last NULL entry */
805 for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
806 ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
807 ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
808 }
809
810 /* fill out 16-bit NTB header */
811 ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
812 ctx->tx_ncm.nth16.wHeaderLength =
813 cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
814 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
815 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
816 ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
817 ctx->tx_ndp_modulus);
818
819 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
820 ctx->tx_seq++;
821
822 /* fill out 16-bit NDP table */
823 ctx->tx_ncm.ndp16.dwSignature =
824 cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
825 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
826 sizeof(struct usb_cdc_ncm_dpe16));
827 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
828 ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
829
830 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
831 &(ctx->tx_ncm.ndp16),
832 sizeof(ctx->tx_ncm.ndp16));
833
834 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
835 sizeof(ctx->tx_ncm.ndp16),
836 &(ctx->tx_ncm.dpe16),
837 (ctx->tx_curr_frame_num + 1) *
838 sizeof(struct usb_cdc_ncm_dpe16));
839
840 /* set frame length */
841 skb_put(skb_out, last_offset);
842
843 /* return skb */
844 ctx->tx_curr_skb = NULL;
845 return skb_out;
846
847exit_no_skb:
848 return NULL;
849}
850
851static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
852{
853 /* start timer, if not already started */
854 if (timer_pending(&ctx->tx_timer) == 0) {
855 ctx->tx_timer.function = &cdc_ncm_tx_timeout;
856 ctx->tx_timer.data = (unsigned long)ctx;
857 ctx->tx_timer.expires = jiffies + ((HZ + 999) / 1000);
858 add_timer(&ctx->tx_timer);
859 }
860}
861
862static void cdc_ncm_tx_timeout(unsigned long arg)
863{
864 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)arg;
865 u8 restart;
866
867 spin_lock(&ctx->mtx);
868 if (ctx->tx_timer_pending != 0) {
869 ctx->tx_timer_pending--;
870 restart = 1;
871 } else
872 restart = 0;
873
874 spin_unlock(&ctx->mtx);
875
876 if (restart)
877 cdc_ncm_tx_timeout_start(ctx);
878 else if (ctx->netdev != NULL)
879 usbnet_start_xmit(NULL, ctx->netdev);
880}
881
882static struct sk_buff *
883cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
884{
885 struct sk_buff *skb_out;
886 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
887 u8 need_timer = 0;
888
889 /*
890 * The Ethernet API we are using does not support transmitting
891 * multiple Ethernet frames in a single call. This driver will
892 * accumulate multiple Ethernet frames and send out a larger
893 * USB frame when the USB buffer is full or when a single jiffies
894 * timeout happens.
895 */
896 if (ctx == NULL)
897 goto error;
898
899 spin_lock(&ctx->mtx);
900 skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
901 if (ctx->tx_curr_skb != NULL)
902 need_timer = 1;
903 spin_unlock(&ctx->mtx);
904
905 /* Start timer, if there is a remaining skb */
906 if (need_timer)
907 cdc_ncm_tx_timeout_start(ctx);
908
909 if (skb_out)
910 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
911 return skb_out;
912
913error:
914 if (skb != NULL)
915 dev_kfree_skb_any(skb);
916
917 return NULL;
918}
919
920static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
921{
922 struct sk_buff *skb;
923 struct cdc_ncm_ctx *ctx;
924 int sumlen;
925 int actlen;
926 int temp;
927 int nframes;
928 int x;
929 int offset;
930
931 ctx = (struct cdc_ncm_ctx *)dev->data[0];
932 if (ctx == NULL)
933 goto error;
934
935 actlen = skb_in->len;
936 sumlen = CDC_NCM_NTB_MAX_SIZE_RX;
937
938 if (actlen < (sizeof(ctx->rx_ncm.nth16) + sizeof(ctx->rx_ncm.ndp16))) {
939 pr_debug("frame too short\n");
940 goto error;
941 }
942
943 memcpy(&(ctx->rx_ncm.nth16), ((u8 *)skb_in->data),
944 sizeof(ctx->rx_ncm.nth16));
945
946 if (le32_to_cpu(ctx->rx_ncm.nth16.dwSignature) !=
947 USB_CDC_NCM_NTH16_SIGN) {
948 pr_debug("invalid NTH16 signature <%u>\n",
949 le32_to_cpu(ctx->rx_ncm.nth16.dwSignature));
950 goto error;
951 }
952
953 temp = le16_to_cpu(ctx->rx_ncm.nth16.wBlockLength);
954 if (temp > sumlen) {
955 pr_debug("unsupported NTB block length %u/%u\n", temp, sumlen);
956 goto error;
957 }
958
959 temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
960 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
961 pr_debug("invalid DPT16 index\n");
962 goto error;
963 }
964
965 memcpy(&(ctx->rx_ncm.ndp16), ((u8 *)skb_in->data) + temp,
966 sizeof(ctx->rx_ncm.ndp16));
967
968 if (le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature) !=
969 USB_CDC_NCM_NDP16_NOCRC_SIGN) {
970 pr_debug("invalid DPT16 signature <%u>\n",
971 le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
972 goto error;
973 }
974
975 if (le16_to_cpu(ctx->rx_ncm.ndp16.wLength) <
976 USB_CDC_NCM_NDP16_LENGTH_MIN) {
977 pr_debug("invalid DPT16 length <%u>\n",
978 le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
979 goto error;
980 }
981
982 nframes = ((le16_to_cpu(ctx->rx_ncm.ndp16.wLength) -
983 sizeof(struct usb_cdc_ncm_ndp16)) /
984 sizeof(struct usb_cdc_ncm_dpe16));
985 nframes--; /* we process NDP entries except for the last one */
986
987 pr_debug("nframes = %u\n", nframes);
988
989 temp += sizeof(ctx->rx_ncm.ndp16);
990
991 if ((temp + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) > actlen) {
992 pr_debug("Invalid nframes = %d\n", nframes);
993 goto error;
994 }
995
996 if (nframes > CDC_NCM_DPT_DATAGRAMS_MAX) {
997 pr_debug("Truncating number of frames from %u to %u\n",
998 nframes, CDC_NCM_DPT_DATAGRAMS_MAX);
999 nframes = CDC_NCM_DPT_DATAGRAMS_MAX;
1000 }
1001
1002 memcpy(&(ctx->rx_ncm.dpe16), ((u8 *)skb_in->data) + temp,
1003 nframes * (sizeof(struct usb_cdc_ncm_dpe16)));
1004
1005 for (x = 0; x < nframes; x++) {
1006 offset = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramIndex);
1007 temp = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramLength);
1008
1009 /*
1010 * CDC NCM ch. 3.7
1011 * All entries after first NULL entry are to be ignored
1012 */
1013 if ((offset == 0) || (temp == 0)) {
1014 if (!x)
1015 goto error; /* empty NTB */
1016 break;
1017 }
1018
1019 /* sanity checking */
1020 if (((offset + temp) > actlen) ||
1021 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
1022 pr_debug("invalid frame detected (ignored)"
1023 "offset[%u]=%u, length=%u, skb=%p\n",
1024 x, offset, temp, skb);
1025 if (!x)
1026 goto error;
1027 break;
1028
1029 } else {
1030 skb = skb_clone(skb_in, GFP_ATOMIC);
1031 skb->len = temp;
1032 skb->data = ((u8 *)skb_in->data) + offset;
1033 skb_set_tail_pointer(skb, temp);
1034 usbnet_skb_return(dev, skb);
1035 }
1036 }
1037 return 1;
1038error:
1039 return 0;
1040}
1041
1042static void
1043cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
1044 struct connection_speed_change *data)
1045{
1046 uint32_t rx_speed = le32_to_cpu(data->USBitRate);
1047 uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
1048
1049 /*
1050 * Currently the USB-NET API does not support reporting the actual
1051 * device speed. Do print it instead.
1052 */
1053 if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
1054 ctx->tx_speed = tx_speed;
1055 ctx->rx_speed = rx_speed;
1056
1057 if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
1058 printk(KERN_INFO KBUILD_MODNAME
1059 ": %s: %u mbit/s downlink "
1060 "%u mbit/s uplink\n",
1061 ctx->netdev->name,
1062 (unsigned int)(rx_speed / 1000000U),
1063 (unsigned int)(tx_speed / 1000000U));
1064 } else {
1065 printk(KERN_INFO KBUILD_MODNAME
1066 ": %s: %u kbit/s downlink "
1067 "%u kbit/s uplink\n",
1068 ctx->netdev->name,
1069 (unsigned int)(rx_speed / 1000U),
1070 (unsigned int)(tx_speed / 1000U));
1071 }
1072 }
1073}
1074
1075static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1076{
1077 struct cdc_ncm_ctx *ctx;
1078 struct usb_cdc_notification *event;
1079
1080 ctx = (struct cdc_ncm_ctx *)dev->data[0];
1081
1082 if (urb->actual_length < sizeof(*event))
1083 return;
1084
1085 /* test for split data in 8-byte chunks */
1086 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
1087 cdc_ncm_speed_change(ctx,
1088 (struct connection_speed_change *)urb->transfer_buffer);
1089 return;
1090 }
1091
1092 event = urb->transfer_buffer;
1093
1094 switch (event->bNotificationType) {
1095 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
1096 /*
1097 * According to the CDC NCM specification ch.7.1
1098 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
1099 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
1100 */
1101 ctx->connected = event->wValue;
1102
1103 printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
1104 " %sconnected\n",
1105 ctx->netdev->name, ctx->connected ? "" : "dis");
1106
1107 if (ctx->connected)
1108 netif_carrier_on(dev->net);
1109 else {
1110 netif_carrier_off(dev->net);
1111 ctx->tx_speed = ctx->rx_speed = 0;
1112 }
1113 break;
1114
1115 case USB_CDC_NOTIFY_SPEED_CHANGE:
1116 if (urb->actual_length <
1117 (sizeof(*event) + sizeof(struct connection_speed_change)))
1118 set_bit(EVENT_STS_SPLIT, &dev->flags);
1119 else
1120 cdc_ncm_speed_change(ctx,
1121 (struct connection_speed_change *) &event[1]);
1122 break;
1123
1124 default:
1125 dev_err(&dev->udev->dev, "NCM: unexpected "
1126 "notification 0x%02x!\n", event->bNotificationType);
1127 break;
1128 }
1129}
1130
1131static int cdc_ncm_check_connect(struct usbnet *dev)
1132{
1133 struct cdc_ncm_ctx *ctx;
1134
1135 ctx = (struct cdc_ncm_ctx *)dev->data[0];
1136 if (ctx == NULL)
1137 return 1; /* disconnected */
1138
1139 return !ctx->connected;
1140}
1141
1142static int
1143cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
1144{
1145 return usbnet_probe(udev, prod);
1146}
1147
1148static void cdc_ncm_disconnect(struct usb_interface *intf)
1149{
1150 struct usbnet *dev = usb_get_intfdata(intf);
1151
1152 if (dev == NULL)
1153 return; /* already disconnected */
1154
1155 usbnet_disconnect(intf);
1156}
1157
1158static int cdc_ncm_manage_power(struct usbnet *dev, int status)
1159{
1160 dev->intf->needs_remote_wakeup = status;
1161 return 0;
1162}
1163
1164static const struct driver_info cdc_ncm_info = {
1165 .description = "CDC NCM",
1166 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET,
1167 .bind = cdc_ncm_bind,
1168 .unbind = cdc_ncm_unbind,
1169 .check_connect = cdc_ncm_check_connect,
1170 .manage_power = cdc_ncm_manage_power,
1171 .status = cdc_ncm_status,
1172 .rx_fixup = cdc_ncm_rx_fixup,
1173 .tx_fixup = cdc_ncm_tx_fixup,
1174};
1175
1176static struct usb_driver cdc_ncm_driver = {
1177 .name = "cdc_ncm",
1178 .id_table = cdc_devs,
1179 .probe = cdc_ncm_probe,
1180 .disconnect = cdc_ncm_disconnect,
1181 .suspend = usbnet_suspend,
1182 .resume = usbnet_resume,
1183 .supports_autosuspend = 1,
1184};
1185
1186static struct ethtool_ops cdc_ncm_ethtool_ops = {
1187 .get_drvinfo = cdc_ncm_get_drvinfo,
1188 .get_link = usbnet_get_link,
1189 .get_msglevel = usbnet_get_msglevel,
1190 .set_msglevel = usbnet_set_msglevel,
1191 .get_settings = usbnet_get_settings,
1192 .set_settings = usbnet_set_settings,
1193 .nway_reset = usbnet_nway_reset,
1194};
1195
1196static int __init cdc_ncm_init(void)
1197{
1198 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION "\n");
1199 return usb_register(&cdc_ncm_driver);
1200}
1201
1202module_init(cdc_ncm_init);
1203
1204static void __exit cdc_ncm_exit(void)
1205{
1206 usb_deregister(&cdc_ncm_driver);
1207}
1208
1209module_exit(cdc_ncm_exit);
1210
1211MODULE_AUTHOR("Hans Petter Selasky");
1212MODULE_DESCRIPTION("USB CDC NCM host driver");
1213MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 62e9e8dc819..93c6b5f62ac 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1745,7 +1745,6 @@ static int hso_serial_ioctl(struct tty_struct *tty, struct file *file,
1745 unsigned int cmd, unsigned long arg) 1745 unsigned int cmd, unsigned long arg)
1746{ 1746{
1747 struct hso_serial *serial = get_serial_by_tty(tty); 1747 struct hso_serial *serial = get_serial_by_tty(tty);
1748 void __user *uarg = (void __user *)arg;
1749 int ret = 0; 1748 int ret = 0;
1750 D4("IOCTL cmd: %d, arg: %ld", cmd, arg); 1749 D4("IOCTL cmd: %d, arg: %ld", cmd, arg);
1751 1750
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index b2bcf99e6f0..7d42f9a2c06 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -363,7 +363,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
363 363
364 /* Paranoid */ 364 /* Paranoid */
365 if (skb->len > IPHETH_BUF_SIZE) { 365 if (skb->len > IPHETH_BUF_SIZE) {
366 WARN(1, "%s: skb too large: %d bytes", __func__, skb->len); 366 WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
367 dev->net->stats.tx_dropped++; 367 dev->net->stats.tx_dropped++;
368 dev_kfree_skb_irq(skb); 368 dev_kfree_skb_irq(skb);
369 return NETDEV_TX_OK; 369 return NETDEV_TX_OK;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6710f09346d..ef3667690b1 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -359,7 +359,7 @@ fail:
359 359
360static int mdio_read(struct net_device *dev, int phy_id, int loc) 360static int mdio_read(struct net_device *dev, int phy_id, int loc)
361{ 361{
362 pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev); 362 pegasus_t *pegasus = netdev_priv(dev);
363 u16 res; 363 u16 res;
364 364
365 read_mii_word(pegasus, phy_id, loc, &res); 365 read_mii_word(pegasus, phy_id, loc, &res);
@@ -397,7 +397,7 @@ fail:
397 397
398static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) 398static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
399{ 399{
400 pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev); 400 pegasus_t *pegasus = netdev_priv(dev);
401 401
402 write_mii_word(pegasus, phy_id, loc, val); 402 write_mii_word(pegasus, phy_id, loc, val);
403} 403}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c04d49e31f8..cff74b81a7d 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -391,14 +391,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
391 goto error; 391 goto error;
392 // else network stack removes extra byte if we forced a short packet 392 // else network stack removes extra byte if we forced a short packet
393 393
394 if (skb->len) 394 if (skb->len) {
395 usbnet_skb_return (dev, skb); 395 /* all data was already cloned from skb inside the driver */
396 else { 396 if (dev->driver_info->flags & FLAG_MULTI_PACKET)
397 netif_dbg(dev, rx_err, dev->net, "drop\n"); 397 dev_kfree_skb_any(skb);
398error: 398 else
399 dev->net->stats.rx_errors++; 399 usbnet_skb_return(dev, skb);
400 skb_queue_tail (&dev->done, skb); 400 return;
401 } 401 }
402
403 netif_dbg(dev, rx_err, dev->net, "drop\n");
404error:
405 dev->net->stats.rx_errors++;
406 skb_queue_tail(&dev->done, skb);
402} 407}
403 408
404/*-------------------------------------------------------------------------*/ 409/*-------------------------------------------------------------------------*/
@@ -971,7 +976,8 @@ static void tx_complete (struct urb *urb)
971 struct usbnet *dev = entry->dev; 976 struct usbnet *dev = entry->dev;
972 977
973 if (urb->status == 0) { 978 if (urb->status == 0) {
974 dev->net->stats.tx_packets++; 979 if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
980 dev->net->stats.tx_packets++;
975 dev->net->stats.tx_bytes += entry->length; 981 dev->net->stats.tx_bytes += entry->length;
976 } else { 982 } else {
977 dev->net->stats.tx_errors++; 983 dev->net->stats.tx_errors++;
@@ -1044,8 +1050,13 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1044 if (info->tx_fixup) { 1050 if (info->tx_fixup) {
1045 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1051 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1046 if (!skb) { 1052 if (!skb) {
1047 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1053 if (netif_msg_tx_err(dev)) {
1048 goto drop; 1054 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1055 goto drop;
1056 } else {
1057 /* cdc_ncm collected packet; waits for more */
1058 goto not_drop;
1059 }
1049 } 1060 }
1050 } 1061 }
1051 length = skb->len; 1062 length = skb->len;
@@ -1067,13 +1078,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1067 /* don't assume the hardware handles USB_ZERO_PACKET 1078 /* don't assume the hardware handles USB_ZERO_PACKET
1068 * NOTE: strictly conforming cdc-ether devices should expect 1079 * NOTE: strictly conforming cdc-ether devices should expect
1069 * the ZLP here, but ignore the one-byte packet. 1080 * the ZLP here, but ignore the one-byte packet.
1081 * NOTE2: CDC NCM specification is different from CDC ECM when
1082 * handling ZLP/short packets, so cdc_ncm driver will make short
1083 * packet itself if needed.
1070 */ 1084 */
1071 if (length % dev->maxpacket == 0) { 1085 if (length % dev->maxpacket == 0) {
1072 if (!(info->flags & FLAG_SEND_ZLP)) { 1086 if (!(info->flags & FLAG_SEND_ZLP)) {
1073 urb->transfer_buffer_length++; 1087 if (!(info->flags & FLAG_MULTI_PACKET)) {
1074 if (skb_tailroom(skb)) { 1088 urb->transfer_buffer_length++;
1075 skb->data[skb->len] = 0; 1089 if (skb_tailroom(skb)) {
1076 __skb_put(skb, 1); 1090 skb->data[skb->len] = 0;
1091 __skb_put(skb, 1);
1092 }
1077 } 1093 }
1078 } else 1094 } else
1079 urb->transfer_flags |= URB_ZERO_PACKET; 1095 urb->transfer_flags |= URB_ZERO_PACKET;
@@ -1122,6 +1138,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1122 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); 1138 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
1123drop: 1139drop:
1124 dev->net->stats.tx_dropped++; 1140 dev->net->stats.tx_dropped++;
1141not_drop:
1125 if (skb) 1142 if (skb)
1126 dev_kfree_skb_any (skb); 1143 dev_kfree_skb_any (skb);
1127 usb_free_urb (urb); 1144 usb_free_urb (urb);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 4930f9dbc49..5e7f069eab5 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
30*/ 30*/
31 31
32#define DRV_NAME "via-rhine" 32#define DRV_NAME "via-rhine"
33#define DRV_VERSION "1.4.3" 33#define DRV_VERSION "1.5.0"
34#define DRV_RELDATE "2007-03-06" 34#define DRV_RELDATE "2010-10-09"
35 35
36 36
37/* A few user-configurable values. 37/* A few user-configurable values.
@@ -100,6 +100,7 @@ static const int multicast_filter_limit = 32;
100#include <linux/mii.h> 100#include <linux/mii.h>
101#include <linux/ethtool.h> 101#include <linux/ethtool.h>
102#include <linux/crc32.h> 102#include <linux/crc32.h>
103#include <linux/if_vlan.h>
103#include <linux/bitops.h> 104#include <linux/bitops.h>
104#include <linux/workqueue.h> 105#include <linux/workqueue.h>
105#include <asm/processor.h> /* Processor type for cache alignment. */ 106#include <asm/processor.h> /* Processor type for cache alignment. */
@@ -133,6 +134,9 @@ MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
133MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 134MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
134MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); 135MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
135 136
137#define MCAM_SIZE 32
138#define VCAM_SIZE 32
139
136/* 140/*
137 Theory of Operation 141 Theory of Operation
138 142
@@ -279,15 +283,16 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
279/* Offsets to the device registers. */ 283/* Offsets to the device registers. */
280enum register_offsets { 284enum register_offsets {
281 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, 285 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
282 ChipCmd1=0x09, 286 ChipCmd1=0x09, TQWake=0x0A,
283 IntrStatus=0x0C, IntrEnable=0x0E, 287 IntrStatus=0x0C, IntrEnable=0x0E,
284 MulticastFilter0=0x10, MulticastFilter1=0x14, 288 MulticastFilter0=0x10, MulticastFilter1=0x14,
285 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, 289 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
286 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, 290 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
287 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, 291 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
288 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, 292 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
289 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, 293 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
290 StickyHW=0x83, IntrStatus2=0x84, 294 StickyHW=0x83, IntrStatus2=0x84,
295 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
291 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, 296 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
292 WOLcrClr1=0xA6, WOLcgClr=0xA7, 297 WOLcrClr1=0xA6, WOLcgClr=0xA7,
293 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, 298 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
@@ -299,6 +304,40 @@ enum backoff_bits {
299 BackCaptureEffect=0x04, BackRandom=0x08 304 BackCaptureEffect=0x04, BackRandom=0x08
300}; 305};
301 306
307/* Bits in the TxConfig (TCR) register */
308enum tcr_bits {
309 TCR_PQEN=0x01,
310 TCR_LB0=0x02, /* loopback[0] */
311 TCR_LB1=0x04, /* loopback[1] */
312 TCR_OFSET=0x08,
313 TCR_RTGOPT=0x10,
314 TCR_RTFT0=0x20,
315 TCR_RTFT1=0x40,
316 TCR_RTSF=0x80,
317};
318
319/* Bits in the CamCon (CAMC) register */
320enum camcon_bits {
321 CAMC_CAMEN=0x01,
322 CAMC_VCAMSL=0x02,
323 CAMC_CAMWR=0x04,
324 CAMC_CAMRD=0x08,
325};
326
327/* Bits in the PCIBusConfig1 (BCR1) register */
328enum bcr1_bits {
329 BCR1_POT0=0x01,
330 BCR1_POT1=0x02,
331 BCR1_POT2=0x04,
332 BCR1_CTFT0=0x08,
333 BCR1_CTFT1=0x10,
334 BCR1_CTSF=0x20,
335 BCR1_TXQNOBK=0x40, /* for VT6105 */
336 BCR1_VIDFR=0x80, /* for VT6105 */
337 BCR1_MED0=0x40, /* for VT6102 */
338 BCR1_MED1=0x80, /* for VT6102 */
339};
340
302#ifdef USE_MMIO 341#ifdef USE_MMIO
303/* Registers we check that mmio and reg are the same. */ 342/* Registers we check that mmio and reg are the same. */
304static const int mmio_verify_registers[] = { 343static const int mmio_verify_registers[] = {
@@ -356,6 +395,11 @@ enum desc_status_bits {
356 DescOwn=0x80000000 395 DescOwn=0x80000000
357}; 396};
358 397
398/* Bits in *_desc.*_length */
399enum desc_length_bits {
400 DescTag=0x00010000
401};
402
359/* Bits in ChipCmd. */ 403/* Bits in ChipCmd. */
360enum chip_cmd_bits { 404enum chip_cmd_bits {
361 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, 405 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
@@ -365,6 +409,9 @@ enum chip_cmd_bits {
365}; 409};
366 410
367struct rhine_private { 411struct rhine_private {
412 /* Bit mask for configured VLAN ids */
413 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
414
368 /* Descriptor rings */ 415 /* Descriptor rings */
369 struct rx_desc *rx_ring; 416 struct rx_desc *rx_ring;
370 struct tx_desc *tx_ring; 417 struct tx_desc *tx_ring;
@@ -405,6 +452,23 @@ struct rhine_private {
405 void __iomem *base; 452 void __iomem *base;
406}; 453};
407 454
455#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
456#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
457#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
458
459#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
460#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
461#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
462
463#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
464#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
465#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
466
467#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
468#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
469#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
470
471
408static int mdio_read(struct net_device *dev, int phy_id, int location); 472static int mdio_read(struct net_device *dev, int phy_id, int location);
409static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 473static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
410static int rhine_open(struct net_device *dev); 474static int rhine_open(struct net_device *dev);
@@ -422,6 +486,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
422static const struct ethtool_ops netdev_ethtool_ops; 486static const struct ethtool_ops netdev_ethtool_ops;
423static int rhine_close(struct net_device *dev); 487static int rhine_close(struct net_device *dev);
424static void rhine_shutdown (struct pci_dev *pdev); 488static void rhine_shutdown (struct pci_dev *pdev);
489static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
490static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
491static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
492static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
493static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
494static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
495static void rhine_init_cam_filter(struct net_device *dev);
496static void rhine_update_vcam(struct net_device *dev);
425 497
426#define RHINE_WAIT_FOR(condition) do { \ 498#define RHINE_WAIT_FOR(condition) do { \
427 int i=1024; \ 499 int i=1024; \
@@ -629,6 +701,8 @@ static const struct net_device_ops rhine_netdev_ops = {
629 .ndo_set_mac_address = eth_mac_addr, 701 .ndo_set_mac_address = eth_mac_addr,
630 .ndo_do_ioctl = netdev_ioctl, 702 .ndo_do_ioctl = netdev_ioctl,
631 .ndo_tx_timeout = rhine_tx_timeout, 703 .ndo_tx_timeout = rhine_tx_timeout,
704 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
705 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
632#ifdef CONFIG_NET_POLL_CONTROLLER 706#ifdef CONFIG_NET_POLL_CONTROLLER
633 .ndo_poll_controller = rhine_poll, 707 .ndo_poll_controller = rhine_poll,
634#endif 708#endif
@@ -795,6 +869,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
795 if (rp->quirks & rqRhineI) 869 if (rp->quirks & rqRhineI)
796 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 870 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
797 871
872 if (pdev->revision >= VT6105M)
873 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
874 NETIF_F_HW_VLAN_FILTER;
875
798 /* dev->name not defined before register_netdev()! */ 876 /* dev->name not defined before register_netdev()! */
799 rc = register_netdev(dev); 877 rc = register_netdev(dev);
800 if (rc) 878 if (rc)
@@ -1040,6 +1118,167 @@ static void rhine_set_carrier(struct mii_if_info *mii)
1040 netif_carrier_ok(mii->dev)); 1118 netif_carrier_ok(mii->dev));
1041} 1119}
1042 1120
1121/**
1122 * rhine_set_cam - set CAM multicast filters
1123 * @ioaddr: register block of this Rhine
1124 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1125 * @addr: multicast address (6 bytes)
1126 *
1127 * Load addresses into multicast filters.
1128 */
1129static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1130{
1131 int i;
1132
1133 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1134 wmb();
1135
1136 /* Paranoid -- idx out of range should never happen */
1137 idx &= (MCAM_SIZE - 1);
1138
1139 iowrite8((u8) idx, ioaddr + CamAddr);
1140
1141 for (i = 0; i < 6; i++, addr++)
1142 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1143 udelay(10);
1144 wmb();
1145
1146 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1147 udelay(10);
1148
1149 iowrite8(0, ioaddr + CamCon);
1150}
1151
1152/**
1153 * rhine_set_vlan_cam - set CAM VLAN filters
1154 * @ioaddr: register block of this Rhine
1155 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1156 * @addr: VLAN ID (2 bytes)
1157 *
1158 * Load addresses into VLAN filters.
1159 */
1160static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1161{
1162 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1163 wmb();
1164
1165 /* Paranoid -- idx out of range should never happen */
1166 idx &= (VCAM_SIZE - 1);
1167
1168 iowrite8((u8) idx, ioaddr + CamAddr);
1169
1170 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1171 udelay(10);
1172 wmb();
1173
1174 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1175 udelay(10);
1176
1177 iowrite8(0, ioaddr + CamCon);
1178}
1179
1180/**
1181 * rhine_set_cam_mask - set multicast CAM mask
1182 * @ioaddr: register block of this Rhine
1183 * @mask: multicast CAM mask
1184 *
1185 * Mask sets multicast filters active/inactive.
1186 */
1187static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1188{
1189 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1190 wmb();
1191
1192 /* write mask */
1193 iowrite32(mask, ioaddr + CamMask);
1194
1195 /* disable CAMEN */
1196 iowrite8(0, ioaddr + CamCon);
1197}
1198
1199/**
1200 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1201 * @ioaddr: register block of this Rhine
1202 * @mask: VLAN CAM mask
1203 *
1204 * Mask sets VLAN filters active/inactive.
1205 */
1206static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1207{
1208 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1209 wmb();
1210
1211 /* write mask */
1212 iowrite32(mask, ioaddr + CamMask);
1213
1214 /* disable CAMEN */
1215 iowrite8(0, ioaddr + CamCon);
1216}
1217
1218/**
1219 * rhine_init_cam_filter - initialize CAM filters
1220 * @dev: network device
1221 *
1222 * Initialize (disable) hardware VLAN and multicast support on this
1223 * Rhine.
1224 */
1225static void rhine_init_cam_filter(struct net_device *dev)
1226{
1227 struct rhine_private *rp = netdev_priv(dev);
1228 void __iomem *ioaddr = rp->base;
1229
1230 /* Disable all CAMs */
1231 rhine_set_vlan_cam_mask(ioaddr, 0);
1232 rhine_set_cam_mask(ioaddr, 0);
1233
1234 /* disable hardware VLAN support */
1235 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1236 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1237}
1238
1239/**
1240 * rhine_update_vcam - update VLAN CAM filters
1241 * @rp: rhine_private data of this Rhine
1242 *
1243 * Update VLAN CAM filters to match configuration change.
1244 */
1245static void rhine_update_vcam(struct net_device *dev)
1246{
1247 struct rhine_private *rp = netdev_priv(dev);
1248 void __iomem *ioaddr = rp->base;
1249 u16 vid;
1250 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1251 unsigned int i = 0;
1252
1253 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1254 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1255 vCAMmask |= 1 << i;
1256 if (++i >= VCAM_SIZE)
1257 break;
1258 }
1259 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1260}
1261
1262static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1263{
1264 struct rhine_private *rp = netdev_priv(dev);
1265
1266 spin_lock_irq(&rp->lock);
1267 set_bit(vid, rp->active_vlans);
1268 rhine_update_vcam(dev);
1269 spin_unlock_irq(&rp->lock);
1270}
1271
1272static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1273{
1274 struct rhine_private *rp = netdev_priv(dev);
1275
1276 spin_lock_irq(&rp->lock);
1277 clear_bit(vid, rp->active_vlans);
1278 rhine_update_vcam(dev);
1279 spin_unlock_irq(&rp->lock);
1280}
1281
1043static void init_registers(struct net_device *dev) 1282static void init_registers(struct net_device *dev)
1044{ 1283{
1045 struct rhine_private *rp = netdev_priv(dev); 1284 struct rhine_private *rp = netdev_priv(dev);
@@ -1061,6 +1300,9 @@ static void init_registers(struct net_device *dev)
1061 1300
1062 rhine_set_rx_mode(dev); 1301 rhine_set_rx_mode(dev);
1063 1302
1303 if (rp->pdev->revision >= VT6105M)
1304 rhine_init_cam_filter(dev);
1305
1064 napi_enable(&rp->napi); 1306 napi_enable(&rp->napi);
1065 1307
1066 /* Enable interrupts by setting the interrupt mask. */ 1308 /* Enable interrupts by setting the interrupt mask. */
@@ -1276,16 +1518,28 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1276 rp->tx_ring[entry].desc_length = 1518 rp->tx_ring[entry].desc_length =
1277 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1519 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1278 1520
1521 if (unlikely(vlan_tx_tag_present(skb))) {
1522 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1523 /* request tagging */
1524 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1525 }
1526 else
1527 rp->tx_ring[entry].tx_status = 0;
1528
1279 /* lock eth irq */ 1529 /* lock eth irq */
1280 spin_lock_irqsave(&rp->lock, flags); 1530 spin_lock_irqsave(&rp->lock, flags);
1281 wmb(); 1531 wmb();
1282 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1532 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1283 wmb(); 1533 wmb();
1284 1534
1285 rp->cur_tx++; 1535 rp->cur_tx++;
1286 1536
1287 /* Non-x86 Todo: explicitly flush cache lines here. */ 1537 /* Non-x86 Todo: explicitly flush cache lines here. */
1288 1538
1539 if (vlan_tx_tag_present(skb))
1540 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1541 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1542
1289 /* Wake the potentially-idle transmit channel */ 1543 /* Wake the potentially-idle transmit channel */
1290 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1544 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1291 ioaddr + ChipCmd1); 1545 ioaddr + ChipCmd1);
@@ -1437,6 +1691,21 @@ static void rhine_tx(struct net_device *dev)
1437 spin_unlock(&rp->lock); 1691 spin_unlock(&rp->lock);
1438} 1692}
1439 1693
1694/**
1695 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1696 * @skb: pointer to sk_buff
1697 * @data_size: used data area of the buffer including CRC
1698 *
1699 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1700 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1701 * aligned following the CRC.
1702 */
1703static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1704{
1705 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1706 return ntohs(*(u16 *)trailer);
1707}
1708
1440/* Process up to limit frames from receive ring */ 1709/* Process up to limit frames from receive ring */
1441static int rhine_rx(struct net_device *dev, int limit) 1710static int rhine_rx(struct net_device *dev, int limit)
1442{ 1711{
@@ -1454,6 +1723,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1454 for (count = 0; count < limit; ++count) { 1723 for (count = 0; count < limit; ++count) {
1455 struct rx_desc *desc = rp->rx_head_desc; 1724 struct rx_desc *desc = rp->rx_head_desc;
1456 u32 desc_status = le32_to_cpu(desc->rx_status); 1725 u32 desc_status = le32_to_cpu(desc->rx_status);
1726 u32 desc_length = le32_to_cpu(desc->desc_length);
1457 int data_size = desc_status >> 16; 1727 int data_size = desc_status >> 16;
1458 1728
1459 if (desc_status & DescOwn) 1729 if (desc_status & DescOwn)
@@ -1498,6 +1768,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1498 struct sk_buff *skb = NULL; 1768 struct sk_buff *skb = NULL;
1499 /* Length should omit the CRC */ 1769 /* Length should omit the CRC */
1500 int pkt_len = data_size - 4; 1770 int pkt_len = data_size - 4;
1771 u16 vlan_tci = 0;
1501 1772
1502 /* Check if the packet is long enough to accept without 1773 /* Check if the packet is long enough to accept without
1503 copying to a minimally-sized skbuff. */ 1774 copying to a minimally-sized skbuff. */
@@ -1532,7 +1803,14 @@ static int rhine_rx(struct net_device *dev, int limit)
1532 rp->rx_buf_sz, 1803 rp->rx_buf_sz,
1533 PCI_DMA_FROMDEVICE); 1804 PCI_DMA_FROMDEVICE);
1534 } 1805 }
1806
1807 if (unlikely(desc_length & DescTag))
1808 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1809
1535 skb->protocol = eth_type_trans(skb, dev); 1810 skb->protocol = eth_type_trans(skb, dev);
1811
1812 if (unlikely(desc_length & DescTag))
1813 __vlan_hwaccel_put_tag(skb, vlan_tci);
1536 netif_receive_skb(skb); 1814 netif_receive_skb(skb);
1537 dev->stats.rx_bytes += pkt_len; 1815 dev->stats.rx_bytes += pkt_len;
1538 dev->stats.rx_packets++; 1816 dev->stats.rx_packets++;
@@ -1596,6 +1874,11 @@ static void rhine_restart_tx(struct net_device *dev) {
1596 1874
1597 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, 1875 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1598 ioaddr + ChipCmd); 1876 ioaddr + ChipCmd);
1877
1878 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1879 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1880 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1881
1599 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1882 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1600 ioaddr + ChipCmd1); 1883 ioaddr + ChipCmd1);
1601 IOSYNC; 1884 IOSYNC;
@@ -1631,7 +1914,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
1631 } 1914 }
1632 if (intr_status & IntrTxUnderrun) { 1915 if (intr_status & IntrTxUnderrun) {
1633 if (rp->tx_thresh < 0xE0) 1916 if (rp->tx_thresh < 0xE0)
1634 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); 1917 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1635 if (debug > 1) 1918 if (debug > 1)
1636 printk(KERN_INFO "%s: Transmitter underrun, Tx " 1919 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1637 "threshold now %2.2x.\n", 1920 "threshold now %2.2x.\n",
@@ -1646,7 +1929,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
1646 (intr_status & (IntrTxAborted | 1929 (intr_status & (IntrTxAborted |
1647 IntrTxUnderrun | IntrTxDescRace)) == 0) { 1930 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1648 if (rp->tx_thresh < 0xE0) { 1931 if (rp->tx_thresh < 0xE0) {
1649 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); 1932 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1650 } 1933 }
1651 if (debug > 1) 1934 if (debug > 1)
1652 printk(KERN_INFO "%s: Unspecified error. Tx " 1935 printk(KERN_INFO "%s: Unspecified error. Tx "
@@ -1688,7 +1971,8 @@ static void rhine_set_rx_mode(struct net_device *dev)
1688 struct rhine_private *rp = netdev_priv(dev); 1971 struct rhine_private *rp = netdev_priv(dev);
1689 void __iomem *ioaddr = rp->base; 1972 void __iomem *ioaddr = rp->base;
1690 u32 mc_filter[2]; /* Multicast hash filter */ 1973 u32 mc_filter[2]; /* Multicast hash filter */
1691 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */ 1974 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
1975 struct netdev_hw_addr *ha;
1692 1976
1693 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1977 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1694 rx_mode = 0x1C; 1978 rx_mode = 0x1C;
@@ -1699,10 +1983,18 @@ static void rhine_set_rx_mode(struct net_device *dev)
1699 /* Too many to match, or accept all multicasts. */ 1983 /* Too many to match, or accept all multicasts. */
1700 iowrite32(0xffffffff, ioaddr + MulticastFilter0); 1984 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1701 iowrite32(0xffffffff, ioaddr + MulticastFilter1); 1985 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1702 rx_mode = 0x0C; 1986 } else if (rp->pdev->revision >= VT6105M) {
1987 int i = 0;
1988 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
1989 netdev_for_each_mc_addr(ha, dev) {
1990 if (i == MCAM_SIZE)
1991 break;
1992 rhine_set_cam(ioaddr, i, ha->addr);
1993 mCAMmask |= 1 << i;
1994 i++;
1995 }
1996 rhine_set_cam_mask(ioaddr, mCAMmask);
1703 } else { 1997 } else {
1704 struct netdev_hw_addr *ha;
1705
1706 memset(mc_filter, 0, sizeof(mc_filter)); 1998 memset(mc_filter, 0, sizeof(mc_filter));
1707 netdev_for_each_mc_addr(ha, dev) { 1999 netdev_for_each_mc_addr(ha, dev) {
1708 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; 2000 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
@@ -1711,9 +2003,15 @@ static void rhine_set_rx_mode(struct net_device *dev)
1711 } 2003 }
1712 iowrite32(mc_filter[0], ioaddr + MulticastFilter0); 2004 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1713 iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 2005 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1714 rx_mode = 0x0C;
1715 } 2006 }
1716 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig); 2007 /* enable/disable VLAN receive filtering */
2008 if (rp->pdev->revision >= VT6105M) {
2009 if (dev->flags & IFF_PROMISC)
2010 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2011 else
2012 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2013 }
2014 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
1717} 2015}
1718 2016
1719static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2017static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1966,7 +2264,7 @@ static int rhine_resume(struct pci_dev *pdev)
1966 if (!netif_running(dev)) 2264 if (!netif_running(dev))
1967 return 0; 2265 return 0;
1968 2266
1969 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) 2267 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1970 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name); 2268 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1971 2269
1972 ret = pci_set_power_state(pdev, PCI_D0); 2270 ret = pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 21314e06e6d..65860a99832 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -44,6 +44,9 @@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
44 44
45static atomic_t devices_found; 45static atomic_t devices_found;
46 46
47#define VMXNET3_MAX_DEVICES 10
48static int enable_mq = 1;
49static int irq_share_mode;
47 50
48/* 51/*
49 * Enable/Disable the given intr 52 * Enable/Disable the given intr
@@ -99,7 +102,7 @@ vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
99static bool 102static bool
100vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 103vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
101{ 104{
102 return netif_queue_stopped(adapter->netdev); 105 return tq->stopped;
103} 106}
104 107
105 108
@@ -107,7 +110,7 @@ static void
107vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 110vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
108{ 111{
109 tq->stopped = false; 112 tq->stopped = false;
110 netif_start_queue(adapter->netdev); 113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
111} 114}
112 115
113 116
@@ -115,7 +118,7 @@ static void
115vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 118vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
116{ 119{
117 tq->stopped = false; 120 tq->stopped = false;
118 netif_wake_queue(adapter->netdev); 121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
119} 122}
120 123
121 124
@@ -124,7 +127,7 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
124{ 127{
125 tq->stopped = true; 128 tq->stopped = true;
126 tq->num_stop++; 129 tq->num_stop++;
127 netif_stop_queue(adapter->netdev); 130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
128} 131}
129 132
130 133
@@ -135,6 +138,7 @@ static void
135vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) 138vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
136{ 139{
137 u32 ret; 140 u32 ret;
141 int i;
138 142
139 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
140 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 144 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -145,22 +149,28 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
145 if (!netif_carrier_ok(adapter->netdev)) 149 if (!netif_carrier_ok(adapter->netdev))
146 netif_carrier_on(adapter->netdev); 150 netif_carrier_on(adapter->netdev);
147 151
148 if (affectTxQueue) 152 if (affectTxQueue) {
149 vmxnet3_tq_start(&adapter->tx_queue, adapter); 153 for (i = 0; i < adapter->num_tx_queues; i++)
154 vmxnet3_tq_start(&adapter->tx_queue[i],
155 adapter);
156 }
150 } else { 157 } else {
151 printk(KERN_INFO "%s: NIC Link is Down\n", 158 printk(KERN_INFO "%s: NIC Link is Down\n",
152 adapter->netdev->name); 159 adapter->netdev->name);
153 if (netif_carrier_ok(adapter->netdev)) 160 if (netif_carrier_ok(adapter->netdev))
154 netif_carrier_off(adapter->netdev); 161 netif_carrier_off(adapter->netdev);
155 162
156 if (affectTxQueue) 163 if (affectTxQueue) {
157 vmxnet3_tq_stop(&adapter->tx_queue, adapter); 164 for (i = 0; i < adapter->num_tx_queues; i++)
165 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
166 }
158 } 167 }
159} 168}
160 169
161static void 170static void
162vmxnet3_process_events(struct vmxnet3_adapter *adapter) 171vmxnet3_process_events(struct vmxnet3_adapter *adapter)
163{ 172{
173 int i;
164 u32 events = le32_to_cpu(adapter->shared->ecr); 174 u32 events = le32_to_cpu(adapter->shared->ecr);
165 if (!events) 175 if (!events)
166 return; 176 return;
@@ -176,16 +186,18 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
176 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 186 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
177 VMXNET3_CMD_GET_QUEUE_STATUS); 187 VMXNET3_CMD_GET_QUEUE_STATUS);
178 188
179 if (adapter->tqd_start->status.stopped) { 189 for (i = 0; i < adapter->num_tx_queues; i++)
180 printk(KERN_ERR "%s: tq error 0x%x\n", 190 if (adapter->tqd_start[i].status.stopped)
181 adapter->netdev->name, 191 dev_err(&adapter->netdev->dev,
182 le32_to_cpu(adapter->tqd_start->status.error)); 192 "%s: tq[%d] error 0x%x\n",
183 } 193 adapter->netdev->name, i, le32_to_cpu(
184 if (adapter->rqd_start->status.stopped) { 194 adapter->tqd_start[i].status.error));
185 printk(KERN_ERR "%s: rq error 0x%x\n", 195 for (i = 0; i < adapter->num_rx_queues; i++)
186 adapter->netdev->name, 196 if (adapter->rqd_start[i].status.stopped)
187 adapter->rqd_start->status.error); 197 dev_err(&adapter->netdev->dev,
188 } 198 "%s: rq[%d] error 0x%x\n",
199 adapter->netdev->name, i,
200 adapter->rqd_start[i].status.error);
189 201
190 schedule_work(&adapter->work); 202 schedule_work(&adapter->work);
191 } 203 }
@@ -410,7 +422,7 @@ vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
410} 422}
411 423
412 424
413void 425static void
414vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 426vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
415 struct vmxnet3_adapter *adapter) 427 struct vmxnet3_adapter *adapter)
416{ 428{
@@ -437,6 +449,17 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
437} 449}
438 450
439 451
452/* Destroy all tx queues */
453void
454vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
455{
456 int i;
457
458 for (i = 0; i < adapter->num_tx_queues; i++)
459 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
460}
461
462
440static void 463static void
441vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, 464vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
442 struct vmxnet3_adapter *adapter) 465 struct vmxnet3_adapter *adapter)
@@ -518,6 +541,14 @@ err:
518 return -ENOMEM; 541 return -ENOMEM;
519} 542}
520 543
544static void
545vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
546{
547 int i;
548
549 for (i = 0; i < adapter->num_tx_queues; i++)
550 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
551}
521 552
522/* 553/*
523 * starting from ring->next2fill, allocate rx buffers for the given ring 554 * starting from ring->next2fill, allocate rx buffers for the given ring
@@ -732,6 +763,17 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
732} 763}
733 764
734 765
766/* Init all tx queues */
767static void
768vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
769{
770 int i;
771
772 for (i = 0; i < adapter->num_tx_queues; i++)
773 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
774}
775
776
735/* 777/*
736 * parse and copy relevant protocol headers: 778 * parse and copy relevant protocol headers:
737 * For a tso pkt, relevant headers are L2/3/4 including options 779 * For a tso pkt, relevant headers are L2/3/4 including options
@@ -903,6 +945,21 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
903 } 945 }
904 } 946 }
905 947
948 spin_lock_irqsave(&tq->tx_lock, flags);
949
950 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
951 tq->stats.tx_ring_full++;
952 dev_dbg(&adapter->netdev->dev,
953 "tx queue stopped on %s, next2comp %u"
954 " next2fill %u\n", adapter->netdev->name,
955 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
956
957 vmxnet3_tq_stop(tq, adapter);
958 spin_unlock_irqrestore(&tq->tx_lock, flags);
959 return NETDEV_TX_BUSY;
960 }
961
962
906 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); 963 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
907 if (ret >= 0) { 964 if (ret >= 0) {
908 BUG_ON(ret <= 0 && ctx.copy_size != 0); 965 BUG_ON(ret <= 0 && ctx.copy_size != 0);
@@ -926,20 +983,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
926 goto drop_pkt; 983 goto drop_pkt;
927 } 984 }
928 985
929 spin_lock_irqsave(&tq->tx_lock, flags);
930
931 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
932 tq->stats.tx_ring_full++;
933 dev_dbg(&adapter->netdev->dev,
934 "tx queue stopped on %s, next2comp %u"
935 " next2fill %u\n", adapter->netdev->name,
936 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
937
938 vmxnet3_tq_stop(tq, adapter);
939 spin_unlock_irqrestore(&tq->tx_lock, flags);
940 return NETDEV_TX_BUSY;
941 }
942
943 /* fill tx descs related to addr & len */ 986 /* fill tx descs related to addr & len */
944 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 987 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
945 988
@@ -1000,7 +1043,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1000 if (le32_to_cpu(tq->shared->txNumDeferred) >= 1043 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1001 le32_to_cpu(tq->shared->txThreshold)) { 1044 le32_to_cpu(tq->shared->txThreshold)) {
1002 tq->shared->txNumDeferred = 0; 1045 tq->shared->txNumDeferred = 0;
1003 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, 1046 VMXNET3_WRITE_BAR0_REG(adapter,
1047 VMXNET3_REG_TXPROD + tq->qid * 8,
1004 tq->tx_ring.next2fill); 1048 tq->tx_ring.next2fill);
1005 } 1049 }
1006 1050
@@ -1020,7 +1064,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1020{ 1064{
1021 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1065 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1022 1066
1023 return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev); 1067 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1068 return vmxnet3_tq_xmit(skb,
1069 &adapter->tx_queue[skb->queue_mapping],
1070 adapter, netdev);
1024} 1071}
1025 1072
1026 1073
@@ -1106,9 +1153,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1106 break; 1153 break;
1107 } 1154 }
1108 num_rxd++; 1155 num_rxd++;
1109 1156 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1110 idx = rcd->rxdIdx; 1157 idx = rcd->rxdIdx;
1111 ring_idx = rcd->rqID == rq->qid ? 0 : 1; 1158 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
1112 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, 1159 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1113 &rxCmdDesc); 1160 &rxCmdDesc);
1114 rbi = rq->buf_info[ring_idx] + idx; 1161 rbi = rq->buf_info[ring_idx] + idx;
@@ -1260,6 +1307,16 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1260} 1307}
1261 1308
1262 1309
1310static void
1311vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1312{
1313 int i;
1314
1315 for (i = 0; i < adapter->num_rx_queues; i++)
1316 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1317}
1318
1319
1263void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 1320void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1264 struct vmxnet3_adapter *adapter) 1321 struct vmxnet3_adapter *adapter)
1265{ 1322{
@@ -1351,6 +1408,25 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1351 1408
1352 1409
1353static int 1410static int
1411vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1412{
1413 int i, err = 0;
1414
1415 for (i = 0; i < adapter->num_rx_queues; i++) {
1416 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1417 if (unlikely(err)) {
1418 dev_err(&adapter->netdev->dev, "%s: failed to "
1419 "initialize rx queue%i\n",
1420 adapter->netdev->name, i);
1421 break;
1422 }
1423 }
1424 return err;
1425
1426}
1427
1428
1429static int
1354vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) 1430vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1355{ 1431{
1356 int i; 1432 int i;
@@ -1398,33 +1474,177 @@ err:
1398 1474
1399 1475
1400static int 1476static int
1477vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1478{
1479 int i, err = 0;
1480
1481 for (i = 0; i < adapter->num_rx_queues; i++) {
1482 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1483 if (unlikely(err)) {
1484 dev_err(&adapter->netdev->dev,
1485 "%s: failed to create rx queue%i\n",
1486 adapter->netdev->name, i);
1487 goto err_out;
1488 }
1489 }
1490 return err;
1491err_out:
1492 vmxnet3_rq_destroy_all(adapter);
1493 return err;
1494
1495}
1496
1497/* Multiple queue aware polling function for tx and rx */
1498
1499static int
1401vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) 1500vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1402{ 1501{
1502 int rcd_done = 0, i;
1403 if (unlikely(adapter->shared->ecr)) 1503 if (unlikely(adapter->shared->ecr))
1404 vmxnet3_process_events(adapter); 1504 vmxnet3_process_events(adapter);
1505 for (i = 0; i < adapter->num_tx_queues; i++)
1506 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1405 1507
1406 vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter); 1508 for (i = 0; i < adapter->num_rx_queues; i++)
1407 return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget); 1509 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1510 adapter, budget);
1511 return rcd_done;
1408} 1512}
1409 1513
1410 1514
1411static int 1515static int
1412vmxnet3_poll(struct napi_struct *napi, int budget) 1516vmxnet3_poll(struct napi_struct *napi, int budget)
1413{ 1517{
1414 struct vmxnet3_adapter *adapter = container_of(napi, 1518 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1415 struct vmxnet3_adapter, napi); 1519 struct vmxnet3_rx_queue, napi);
1520 int rxd_done;
1521
1522 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1523
1524 if (rxd_done < budget) {
1525 napi_complete(napi);
1526 vmxnet3_enable_all_intrs(rx_queue->adapter);
1527 }
1528 return rxd_done;
1529}
1530
1531/*
1532 * NAPI polling function for MSI-X mode with multiple Rx queues
1533 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1534 */
1535
1536static int
1537vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1538{
1539 struct vmxnet3_rx_queue *rq = container_of(napi,
1540 struct vmxnet3_rx_queue, napi);
1541 struct vmxnet3_adapter *adapter = rq->adapter;
1416 int rxd_done; 1542 int rxd_done;
1417 1543
1418 rxd_done = vmxnet3_do_poll(adapter, budget); 1544 /* When sharing interrupt with corresponding tx queue, process
1545 * tx completions in that queue as well
1546 */
1547 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1548 struct vmxnet3_tx_queue *tq =
1549 &adapter->tx_queue[rq - adapter->rx_queue];
1550 vmxnet3_tq_tx_complete(tq, adapter);
1551 }
1552
1553 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1419 1554
1420 if (rxd_done < budget) { 1555 if (rxd_done < budget) {
1421 napi_complete(napi); 1556 napi_complete(napi);
1422 vmxnet3_enable_intr(adapter, 0); 1557 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1423 } 1558 }
1424 return rxd_done; 1559 return rxd_done;
1425} 1560}
1426 1561
1427 1562
1563#ifdef CONFIG_PCI_MSI
1564
1565/*
1566 * Handle completion interrupts on tx queues
1567 * Returns whether or not the intr is handled
1568 */
1569
1570static irqreturn_t
1571vmxnet3_msix_tx(int irq, void *data)
1572{
1573 struct vmxnet3_tx_queue *tq = data;
1574 struct vmxnet3_adapter *adapter = tq->adapter;
1575
1576 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1577 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1578
1579 /* Handle the case where only one irq is allocate for all tx queues */
1580 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1581 int i;
1582 for (i = 0; i < adapter->num_tx_queues; i++) {
1583 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1584 vmxnet3_tq_tx_complete(txq, adapter);
1585 }
1586 } else {
1587 vmxnet3_tq_tx_complete(tq, adapter);
1588 }
1589 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1590
1591 return IRQ_HANDLED;
1592}
1593
1594
1595/*
1596 * Handle completion interrupts on rx queues. Returns whether or not the
1597 * intr is handled
1598 */
1599
1600static irqreturn_t
1601vmxnet3_msix_rx(int irq, void *data)
1602{
1603 struct vmxnet3_rx_queue *rq = data;
1604 struct vmxnet3_adapter *adapter = rq->adapter;
1605
1606 /* disable intr if needed */
1607 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1608 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1609 napi_schedule(&rq->napi);
1610
1611 return IRQ_HANDLED;
1612}
1613
1614/*
1615 *----------------------------------------------------------------------------
1616 *
1617 * vmxnet3_msix_event --
1618 *
1619 * vmxnet3 msix event intr handler
1620 *
1621 * Result:
1622 * whether or not the intr is handled
1623 *
1624 *----------------------------------------------------------------------------
1625 */
1626
1627static irqreturn_t
1628vmxnet3_msix_event(int irq, void *data)
1629{
1630 struct net_device *dev = data;
1631 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1632
1633 /* disable intr if needed */
1634 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1635 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1636
1637 if (adapter->shared->ecr)
1638 vmxnet3_process_events(adapter);
1639
1640 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1641
1642 return IRQ_HANDLED;
1643}
1644
1645#endif /* CONFIG_PCI_MSI */
1646
1647
1428/* Interrupt handler for vmxnet3 */ 1648/* Interrupt handler for vmxnet3 */
1429static irqreturn_t 1649static irqreturn_t
1430vmxnet3_intr(int irq, void *dev_id) 1650vmxnet3_intr(int irq, void *dev_id)
@@ -1432,7 +1652,7 @@ vmxnet3_intr(int irq, void *dev_id)
1432 struct net_device *dev = dev_id; 1652 struct net_device *dev = dev_id;
1433 struct vmxnet3_adapter *adapter = netdev_priv(dev); 1653 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1434 1654
1435 if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) { 1655 if (adapter->intr.type == VMXNET3_IT_INTX) {
1436 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 1656 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1437 if (unlikely(icr == 0)) 1657 if (unlikely(icr == 0))
1438 /* not ours */ 1658 /* not ours */
@@ -1442,77 +1662,144 @@ vmxnet3_intr(int irq, void *dev_id)
1442 1662
1443 /* disable intr if needed */ 1663 /* disable intr if needed */
1444 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1664 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1445 vmxnet3_disable_intr(adapter, 0); 1665 vmxnet3_disable_all_intrs(adapter);
1446 1666
1447 napi_schedule(&adapter->napi); 1667 napi_schedule(&adapter->rx_queue[0].napi);
1448 1668
1449 return IRQ_HANDLED; 1669 return IRQ_HANDLED;
1450} 1670}
1451 1671
1452#ifdef CONFIG_NET_POLL_CONTROLLER 1672#ifdef CONFIG_NET_POLL_CONTROLLER
1453 1673
1454
1455/* netpoll callback. */ 1674/* netpoll callback. */
1456static void 1675static void
1457vmxnet3_netpoll(struct net_device *netdev) 1676vmxnet3_netpoll(struct net_device *netdev)
1458{ 1677{
1459 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1678 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1460 int irq;
1461 1679
1462#ifdef CONFIG_PCI_MSI 1680 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1463 if (adapter->intr.type == VMXNET3_IT_MSIX) 1681 vmxnet3_disable_all_intrs(adapter);
1464 irq = adapter->intr.msix_entries[0].vector; 1682
1465 else 1683 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1466#endif 1684 vmxnet3_enable_all_intrs(adapter);
1467 irq = adapter->pdev->irq;
1468 1685
1469 disable_irq(irq);
1470 vmxnet3_intr(irq, netdev);
1471 enable_irq(irq);
1472} 1686}
1473#endif 1687#endif /* CONFIG_NET_POLL_CONTROLLER */
1474 1688
1475static int 1689static int
1476vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) 1690vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1477{ 1691{
1478 int err; 1692 struct vmxnet3_intr *intr = &adapter->intr;
1693 int err = 0, i;
1694 int vector = 0;
1479 1695
1480#ifdef CONFIG_PCI_MSI 1696#ifdef CONFIG_PCI_MSI
1481 if (adapter->intr.type == VMXNET3_IT_MSIX) { 1697 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1482 /* we only use 1 MSI-X vector */ 1698 for (i = 0; i < adapter->num_tx_queues; i++) {
1483 err = request_irq(adapter->intr.msix_entries[0].vector, 1699 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1484 vmxnet3_intr, 0, adapter->netdev->name, 1700 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1485 adapter->netdev); 1701 adapter->netdev->name, vector);
1486 } else if (adapter->intr.type == VMXNET3_IT_MSI) { 1702 err = request_irq(
1703 intr->msix_entries[vector].vector,
1704 vmxnet3_msix_tx, 0,
1705 adapter->tx_queue[i].name,
1706 &adapter->tx_queue[i]);
1707 } else {
1708 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1709 adapter->netdev->name, vector);
1710 }
1711 if (err) {
1712 dev_err(&adapter->netdev->dev,
1713 "Failed to request irq for MSIX, %s, "
1714 "error %d\n",
1715 adapter->tx_queue[i].name, err);
1716 return err;
1717 }
1718
1719 /* Handle the case where only 1 MSIx was allocated for
1720 * all tx queues */
1721 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1722 for (; i < adapter->num_tx_queues; i++)
1723 adapter->tx_queue[i].comp_ring.intr_idx
1724 = vector;
1725 vector++;
1726 break;
1727 } else {
1728 adapter->tx_queue[i].comp_ring.intr_idx
1729 = vector++;
1730 }
1731 }
1732 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1733 vector = 0;
1734
1735 for (i = 0; i < adapter->num_rx_queues; i++) {
1736 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1737 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1738 adapter->netdev->name, vector);
1739 else
1740 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1741 adapter->netdev->name, vector);
1742 err = request_irq(intr->msix_entries[vector].vector,
1743 vmxnet3_msix_rx, 0,
1744 adapter->rx_queue[i].name,
1745 &(adapter->rx_queue[i]));
1746 if (err) {
1747 printk(KERN_ERR "Failed to request irq for MSIX"
1748 ", %s, error %d\n",
1749 adapter->rx_queue[i].name, err);
1750 return err;
1751 }
1752
1753 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1754 }
1755
1756 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1757 adapter->netdev->name, vector);
1758 err = request_irq(intr->msix_entries[vector].vector,
1759 vmxnet3_msix_event, 0,
1760 intr->event_msi_vector_name, adapter->netdev);
1761 intr->event_intr_idx = vector;
1762
1763 } else if (intr->type == VMXNET3_IT_MSI) {
1764 adapter->num_rx_queues = 1;
1487 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1765 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1488 adapter->netdev->name, adapter->netdev); 1766 adapter->netdev->name, adapter->netdev);
1489 } else 1767 } else {
1490#endif 1768#endif
1491 { 1769 adapter->num_rx_queues = 1;
1492 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1770 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1493 IRQF_SHARED, adapter->netdev->name, 1771 IRQF_SHARED, adapter->netdev->name,
1494 adapter->netdev); 1772 adapter->netdev);
1773#ifdef CONFIG_PCI_MSI
1495 } 1774 }
1496 1775#endif
1497 if (err) 1776 intr->num_intrs = vector + 1;
1777 if (err) {
1498 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" 1778 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1499 ":%d\n", adapter->netdev->name, adapter->intr.type, err); 1779 ":%d\n", adapter->netdev->name, intr->type, err);
1780 } else {
1781 /* Number of rx queues will not change after this */
1782 for (i = 0; i < adapter->num_rx_queues; i++) {
1783 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1784 rq->qid = i;
1785 rq->qid2 = i + adapter->num_rx_queues;
1786 }
1500 1787
1501 1788
1502 if (!err) {
1503 int i;
1504 /* init our intr settings */
1505 for (i = 0; i < adapter->intr.num_intrs; i++)
1506 adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
1507 1789
1508 /* next setup intr index for all intr sources */ 1790 /* init our intr settings */
1509 adapter->tx_queue.comp_ring.intr_idx = 0; 1791 for (i = 0; i < intr->num_intrs; i++)
1510 adapter->rx_queue.comp_ring.intr_idx = 0; 1792 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1511 adapter->intr.event_intr_idx = 0; 1793 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1794 adapter->intr.event_intr_idx = 0;
1795 for (i = 0; i < adapter->num_tx_queues; i++)
1796 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1797 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1798 }
1512 1799
1513 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " 1800 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1514 "allocated\n", adapter->netdev->name, adapter->intr.type, 1801 "allocated\n", adapter->netdev->name, intr->type,
1515 adapter->intr.mask_mode, adapter->intr.num_intrs); 1802 intr->mask_mode, intr->num_intrs);
1516 } 1803 }
1517 1804
1518 return err; 1805 return err;
@@ -1522,18 +1809,32 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1522static void 1809static void
1523vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) 1810vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1524{ 1811{
1525 BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO || 1812 struct vmxnet3_intr *intr = &adapter->intr;
1526 adapter->intr.num_intrs <= 0); 1813 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
1527 1814
1528 switch (adapter->intr.type) { 1815 switch (intr->type) {
1529#ifdef CONFIG_PCI_MSI 1816#ifdef CONFIG_PCI_MSI
1530 case VMXNET3_IT_MSIX: 1817 case VMXNET3_IT_MSIX:
1531 { 1818 {
1532 int i; 1819 int i, vector = 0;
1533 1820
1534 for (i = 0; i < adapter->intr.num_intrs; i++) 1821 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1535 free_irq(adapter->intr.msix_entries[i].vector, 1822 for (i = 0; i < adapter->num_tx_queues; i++) {
1536 adapter->netdev); 1823 free_irq(intr->msix_entries[vector++].vector,
1824 &(adapter->tx_queue[i]));
1825 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1826 break;
1827 }
1828 }
1829
1830 for (i = 0; i < adapter->num_rx_queues; i++) {
1831 free_irq(intr->msix_entries[vector++].vector,
1832 &(adapter->rx_queue[i]));
1833 }
1834
1835 free_irq(intr->msix_entries[vector].vector,
1836 adapter->netdev);
1837 BUG_ON(vector >= intr->num_intrs);
1537 break; 1838 break;
1538 } 1839 }
1539#endif 1840#endif
@@ -1727,6 +2028,15 @@ vmxnet3_set_mc(struct net_device *netdev)
1727 kfree(new_table); 2028 kfree(new_table);
1728} 2029}
1729 2030
2031void
2032vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2033{
2034 int i;
2035
2036 for (i = 0; i < adapter->num_rx_queues; i++)
2037 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2038}
2039
1730 2040
1731/* 2041/*
1732 * Set up driver_shared based on settings in adapter. 2042 * Set up driver_shared based on settings in adapter.
@@ -1774,40 +2084,72 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1774 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2084 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
1775 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2085 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
1776 devRead->misc.queueDescLen = cpu_to_le32( 2086 devRead->misc.queueDescLen = cpu_to_le32(
1777 sizeof(struct Vmxnet3_TxQueueDesc) + 2087 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
1778 sizeof(struct Vmxnet3_RxQueueDesc)); 2088 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
1779 2089
1780 /* tx queue settings */ 2090 /* tx queue settings */
1781 BUG_ON(adapter->tx_queue.tx_ring.base == NULL); 2091 devRead->misc.numTxQueues = adapter->num_tx_queues;
1782 2092 for (i = 0; i < adapter->num_tx_queues; i++) {
1783 devRead->misc.numTxQueues = 1; 2093 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
1784 tqc = &adapter->tqd_start->conf; 2094 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
1785 tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA); 2095 tqc = &adapter->tqd_start[i].conf;
1786 tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA); 2096 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
1787 tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA); 2097 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
1788 tqc->ddPA = cpu_to_le64(virt_to_phys( 2098 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
1789 adapter->tx_queue.buf_info)); 2099 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
1790 tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size); 2100 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
1791 tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size); 2101 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
1792 tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size); 2102 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
1793 tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) * 2103 tqc->ddLen = cpu_to_le32(
1794 tqc->txRingSize); 2104 sizeof(struct vmxnet3_tx_buf_info) *
1795 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; 2105 tqc->txRingSize);
2106 tqc->intrIdx = tq->comp_ring.intr_idx;
2107 }
1796 2108
1797 /* rx queue settings */ 2109 /* rx queue settings */
1798 devRead->misc.numRxQueues = 1; 2110 devRead->misc.numRxQueues = adapter->num_rx_queues;
1799 rqc = &adapter->rqd_start->conf; 2111 for (i = 0; i < adapter->num_rx_queues; i++) {
1800 rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA); 2112 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1801 rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA); 2113 rqc = &adapter->rqd_start[i].conf;
1802 rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA); 2114 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
1803 rqc->ddPA = cpu_to_le64(virt_to_phys( 2115 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
1804 adapter->rx_queue.buf_info)); 2116 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
1805 rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size); 2117 rqc->ddPA = cpu_to_le64(virt_to_phys(
1806 rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size); 2118 rq->buf_info));
1807 rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size); 2119 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
1808 rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) * 2120 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
1809 (rqc->rxRingSize[0] + rqc->rxRingSize[1])); 2121 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
1810 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; 2122 rqc->ddLen = cpu_to_le32(
2123 sizeof(struct vmxnet3_rx_buf_info) *
2124 (rqc->rxRingSize[0] +
2125 rqc->rxRingSize[1]));
2126 rqc->intrIdx = rq->comp_ring.intr_idx;
2127 }
2128
2129#ifdef VMXNET3_RSS
2130 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2131
2132 if (adapter->rss) {
2133 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2134 devRead->misc.uptFeatures |= UPT1_F_RSS;
2135 devRead->misc.numRxQueues = adapter->num_rx_queues;
2136 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2137 UPT1_RSS_HASH_TYPE_IPV4 |
2138 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2139 UPT1_RSS_HASH_TYPE_IPV6;
2140 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2141 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2142 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2143 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
2144 for (i = 0; i < rssConf->indTableSize; i++)
2145 rssConf->indTable[i] = i % adapter->num_rx_queues;
2146
2147 devRead->rssConfDesc.confVer = 1;
2148 devRead->rssConfDesc.confLen = sizeof(*rssConf);
2149 devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
2150 }
2151
2152#endif /* VMXNET3_RSS */
1811 2153
1812 /* intr settings */ 2154 /* intr settings */
1813 devRead->intrConf.autoMask = adapter->intr.mask_mode == 2155 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
@@ -1829,18 +2171,18 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1829int 2171int
1830vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) 2172vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1831{ 2173{
1832 int err; 2174 int err, i;
1833 u32 ret; 2175 u32 ret;
1834 2176
1835 dev_dbg(&adapter->netdev->dev, 2177 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
1836 "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" 2178 " ring sizes %u %u %u\n", adapter->netdev->name,
1837 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, 2179 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
1838 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, 2180 adapter->tx_queue[0].tx_ring.size,
1839 adapter->rx_queue.rx_ring[0].size, 2181 adapter->rx_queue[0].rx_ring[0].size,
1840 adapter->rx_queue.rx_ring[1].size); 2182 adapter->rx_queue[0].rx_ring[1].size);
1841 2183
1842 vmxnet3_tq_init(&adapter->tx_queue, adapter); 2184 vmxnet3_tq_init_all(adapter);
1843 err = vmxnet3_rq_init(&adapter->rx_queue, adapter); 2185 err = vmxnet3_rq_init_all(adapter);
1844 if (err) { 2186 if (err) {
1845 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", 2187 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
1846 adapter->netdev->name, err); 2188 adapter->netdev->name, err);
@@ -1870,10 +2212,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1870 err = -EINVAL; 2212 err = -EINVAL;
1871 goto activate_err; 2213 goto activate_err;
1872 } 2214 }
1873 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD, 2215
1874 adapter->rx_queue.rx_ring[0].next2fill); 2216 for (i = 0; i < adapter->num_rx_queues; i++) {
1875 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2, 2217 VMXNET3_WRITE_BAR0_REG(adapter,
1876 adapter->rx_queue.rx_ring[1].next2fill); 2218 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2219 adapter->rx_queue[i].rx_ring[0].next2fill);
2220 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2221 (i * VMXNET3_REG_ALIGN)),
2222 adapter->rx_queue[i].rx_ring[1].next2fill);
2223 }
1877 2224
1878 /* Apply the rx filter settins last. */ 2225 /* Apply the rx filter settins last. */
1879 vmxnet3_set_mc(adapter->netdev); 2226 vmxnet3_set_mc(adapter->netdev);
@@ -1883,8 +2230,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1883 * tx queue if the link is up. 2230 * tx queue if the link is up.
1884 */ 2231 */
1885 vmxnet3_check_link(adapter, true); 2232 vmxnet3_check_link(adapter, true);
1886 2233 for (i = 0; i < adapter->num_rx_queues; i++)
1887 napi_enable(&adapter->napi); 2234 napi_enable(&adapter->rx_queue[i].napi);
1888 vmxnet3_enable_all_intrs(adapter); 2235 vmxnet3_enable_all_intrs(adapter);
1889 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 2236 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
1890 return 0; 2237 return 0;
@@ -1896,7 +2243,7 @@ activate_err:
1896irq_err: 2243irq_err:
1897rq_err: 2244rq_err:
1898 /* free up buffers we allocated */ 2245 /* free up buffers we allocated */
1899 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); 2246 vmxnet3_rq_cleanup_all(adapter);
1900 return err; 2247 return err;
1901} 2248}
1902 2249
@@ -1911,6 +2258,7 @@ vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
1911int 2258int
1912vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2259vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1913{ 2260{
2261 int i;
1914 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2262 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
1915 return 0; 2263 return 0;
1916 2264
@@ -1919,13 +2267,14 @@ vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1919 VMXNET3_CMD_QUIESCE_DEV); 2267 VMXNET3_CMD_QUIESCE_DEV);
1920 vmxnet3_disable_all_intrs(adapter); 2268 vmxnet3_disable_all_intrs(adapter);
1921 2269
1922 napi_disable(&adapter->napi); 2270 for (i = 0; i < adapter->num_rx_queues; i++)
2271 napi_disable(&adapter->rx_queue[i].napi);
1923 netif_tx_disable(adapter->netdev); 2272 netif_tx_disable(adapter->netdev);
1924 adapter->link_speed = 0; 2273 adapter->link_speed = 0;
1925 netif_carrier_off(adapter->netdev); 2274 netif_carrier_off(adapter->netdev);
1926 2275
1927 vmxnet3_tq_cleanup(&adapter->tx_queue, adapter); 2276 vmxnet3_tq_cleanup_all(adapter);
1928 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); 2277 vmxnet3_rq_cleanup_all(adapter);
1929 vmxnet3_free_irqs(adapter); 2278 vmxnet3_free_irqs(adapter);
1930 return 0; 2279 return 0;
1931} 2280}
@@ -2047,7 +2396,9 @@ vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2047static void 2396static void
2048vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) 2397vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2049{ 2398{
2050 size_t sz; 2399 size_t sz, i, ring0_size, ring1_size, comp_size;
2400 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2401
2051 2402
2052 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - 2403 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2053 VMXNET3_MAX_ETH_HDR_SIZE) { 2404 VMXNET3_MAX_ETH_HDR_SIZE) {
@@ -2069,11 +2420,19 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2069 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 2420 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2070 */ 2421 */
2071 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2422 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2072 adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size + 2423 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2073 sz - 1) / sz * sz; 2424 ring0_size = (ring0_size + sz - 1) / sz * sz;
2074 adapter->rx_queue.rx_ring[0].size = min_t(u32, 2425 ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
2075 adapter->rx_queue.rx_ring[0].size, 2426 sz * sz);
2076 VMXNET3_RX_RING_MAX_SIZE / sz * sz); 2427 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2428 comp_size = ring0_size + ring1_size;
2429
2430 for (i = 0; i < adapter->num_rx_queues; i++) {
2431 rq = &adapter->rx_queue[i];
2432 rq->rx_ring[0].size = ring0_size;
2433 rq->rx_ring[1].size = ring1_size;
2434 rq->comp_ring.size = comp_size;
2435 }
2077} 2436}
2078 2437
2079 2438
@@ -2081,29 +2440,53 @@ int
2081vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 2440vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2082 u32 rx_ring_size, u32 rx_ring2_size) 2441 u32 rx_ring_size, u32 rx_ring2_size)
2083{ 2442{
2084 int err; 2443 int err = 0, i;
2085 2444
2086 adapter->tx_queue.tx_ring.size = tx_ring_size; 2445 for (i = 0; i < adapter->num_tx_queues; i++) {
2087 adapter->tx_queue.data_ring.size = tx_ring_size; 2446 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2088 adapter->tx_queue.comp_ring.size = tx_ring_size; 2447 tq->tx_ring.size = tx_ring_size;
2089 adapter->tx_queue.shared = &adapter->tqd_start->ctrl; 2448 tq->data_ring.size = tx_ring_size;
2090 adapter->tx_queue.stopped = true; 2449 tq->comp_ring.size = tx_ring_size;
2091 err = vmxnet3_tq_create(&adapter->tx_queue, adapter); 2450 tq->shared = &adapter->tqd_start[i].ctrl;
2092 if (err) 2451 tq->stopped = true;
2093 return err; 2452 tq->adapter = adapter;
2453 tq->qid = i;
2454 err = vmxnet3_tq_create(tq, adapter);
2455 /*
2456 * Too late to change num_tx_queues. We cannot do away with
2457 * lesser number of queues than what we asked for
2458 */
2459 if (err)
2460 goto queue_err;
2461 }
2094 2462
2095 adapter->rx_queue.rx_ring[0].size = rx_ring_size; 2463 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2096 adapter->rx_queue.rx_ring[1].size = rx_ring2_size; 2464 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2097 vmxnet3_adjust_rx_ring_size(adapter); 2465 vmxnet3_adjust_rx_ring_size(adapter);
2098 adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size + 2466 for (i = 0; i < adapter->num_rx_queues; i++) {
2099 adapter->rx_queue.rx_ring[1].size; 2467 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2100 adapter->rx_queue.qid = 0; 2468 /* qid and qid2 for rx queues will be assigned later when num
2101 adapter->rx_queue.qid2 = 1; 2469 * of rx queues is finalized after allocating intrs */
2102 adapter->rx_queue.shared = &adapter->rqd_start->ctrl; 2470 rq->shared = &adapter->rqd_start[i].ctrl;
2103 err = vmxnet3_rq_create(&adapter->rx_queue, adapter); 2471 rq->adapter = adapter;
2104 if (err) 2472 err = vmxnet3_rq_create(rq, adapter);
2105 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 2473 if (err) {
2106 2474 if (i == 0) {
2475 printk(KERN_ERR "Could not allocate any rx"
2476 "queues. Aborting.\n");
2477 goto queue_err;
2478 } else {
2479 printk(KERN_INFO "Number of rx queues changed "
2480 "to : %d.\n", i);
2481 adapter->num_rx_queues = i;
2482 err = 0;
2483 break;
2484 }
2485 }
2486 }
2487 return err;
2488queue_err:
2489 vmxnet3_tq_destroy_all(adapter);
2107 return err; 2490 return err;
2108} 2491}
2109 2492
@@ -2111,11 +2494,12 @@ static int
2111vmxnet3_open(struct net_device *netdev) 2494vmxnet3_open(struct net_device *netdev)
2112{ 2495{
2113 struct vmxnet3_adapter *adapter; 2496 struct vmxnet3_adapter *adapter;
2114 int err; 2497 int err, i;
2115 2498
2116 adapter = netdev_priv(netdev); 2499 adapter = netdev_priv(netdev);
2117 2500
2118 spin_lock_init(&adapter->tx_queue.tx_lock); 2501 for (i = 0; i < adapter->num_tx_queues; i++)
2502 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2119 2503
2120 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, 2504 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2121 VMXNET3_DEF_RX_RING_SIZE, 2505 VMXNET3_DEF_RX_RING_SIZE,
@@ -2130,8 +2514,8 @@ vmxnet3_open(struct net_device *netdev)
2130 return 0; 2514 return 0;
2131 2515
2132activate_err: 2516activate_err:
2133 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2517 vmxnet3_rq_destroy_all(adapter);
2134 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 2518 vmxnet3_tq_destroy_all(adapter);
2135queue_err: 2519queue_err:
2136 return err; 2520 return err;
2137} 2521}
@@ -2151,8 +2535,8 @@ vmxnet3_close(struct net_device *netdev)
2151 2535
2152 vmxnet3_quiesce_dev(adapter); 2536 vmxnet3_quiesce_dev(adapter);
2153 2537
2154 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2538 vmxnet3_rq_destroy_all(adapter);
2155 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 2539 vmxnet3_tq_destroy_all(adapter);
2156 2540
2157 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2541 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2158 2542
@@ -2164,6 +2548,8 @@ vmxnet3_close(struct net_device *netdev)
2164void 2548void
2165vmxnet3_force_close(struct vmxnet3_adapter *adapter) 2549vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2166{ 2550{
2551 int i;
2552
2167 /* 2553 /*
2168 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise 2554 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2169 * vmxnet3_close() will deadlock. 2555 * vmxnet3_close() will deadlock.
@@ -2171,7 +2557,8 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2171 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); 2557 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2172 2558
2173 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2559 /* we need to enable NAPI, otherwise dev_close will deadlock */
2174 napi_enable(&adapter->napi); 2560 for (i = 0; i < adapter->num_rx_queues; i++)
2561 napi_enable(&adapter->rx_queue[i].napi);
2175 dev_close(adapter->netdev); 2562 dev_close(adapter->netdev);
2176} 2563}
2177 2564
@@ -2202,14 +2589,11 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2202 vmxnet3_reset_dev(adapter); 2589 vmxnet3_reset_dev(adapter);
2203 2590
2204 /* we need to re-create the rx queue based on the new mtu */ 2591 /* we need to re-create the rx queue based on the new mtu */
2205 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2592 vmxnet3_rq_destroy_all(adapter);
2206 vmxnet3_adjust_rx_ring_size(adapter); 2593 vmxnet3_adjust_rx_ring_size(adapter);
2207 adapter->rx_queue.comp_ring.size = 2594 err = vmxnet3_rq_create_all(adapter);
2208 adapter->rx_queue.rx_ring[0].size +
2209 adapter->rx_queue.rx_ring[1].size;
2210 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
2211 if (err) { 2595 if (err) {
2212 printk(KERN_ERR "%s: failed to re-create rx queue," 2596 printk(KERN_ERR "%s: failed to re-create rx queues,"
2213 " error %d. Closing it.\n", netdev->name, err); 2597 " error %d. Closing it.\n", netdev->name, err);
2214 goto out; 2598 goto out;
2215 } 2599 }
@@ -2274,6 +2658,55 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2274 mac[5] = (tmp >> 8) & 0xff; 2658 mac[5] = (tmp >> 8) & 0xff;
2275} 2659}
2276 2660
2661#ifdef CONFIG_PCI_MSI
2662
2663/*
2664 * Enable MSIx vectors.
2665 * Returns :
2666 * 0 on successful enabling of required vectors,
2667 * VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required
2668 * could be enabled.
2669 * number of vectors which can be enabled otherwise (this number is smaller
2670 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2671 */
2672
2673static int
2674vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2675 int vectors)
2676{
2677 int err = 0, vector_threshold;
2678 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2679
2680 while (vectors >= vector_threshold) {
2681 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2682 vectors);
2683 if (!err) {
2684 adapter->intr.num_intrs = vectors;
2685 return 0;
2686 } else if (err < 0) {
2687 printk(KERN_ERR "Failed to enable MSI-X for %s, error"
2688 " %d\n", adapter->netdev->name, err);
2689 vectors = 0;
2690 } else if (err < vector_threshold) {
2691 break;
2692 } else {
2693 /* If fails to enable required number of MSI-x vectors
2694 * try enabling 3 of them. One each for rx, tx and event
2695 */
2696 vectors = vector_threshold;
2697 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
2698 " %d instead\n", vectors, adapter->netdev->name,
2699 vector_threshold);
2700 }
2701 }
2702
2703 printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
2704 " are lower than min threshold required.\n");
2705 return err;
2706}
2707
2708
2709#endif /* CONFIG_PCI_MSI */
2277 2710
2278static void 2711static void
2279vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) 2712vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
@@ -2293,16 +2726,47 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2293 2726
2294#ifdef CONFIG_PCI_MSI 2727#ifdef CONFIG_PCI_MSI
2295 if (adapter->intr.type == VMXNET3_IT_MSIX) { 2728 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2296 int err; 2729 int vector, err = 0;
2297 2730
2298 adapter->intr.msix_entries[0].entry = 0; 2731 adapter->intr.num_intrs = (adapter->share_intr ==
2299 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, 2732 VMXNET3_INTR_TXSHARE) ? 1 :
2300 VMXNET3_LINUX_MAX_MSIX_VECT); 2733 adapter->num_tx_queues;
2301 if (!err) { 2734 adapter->intr.num_intrs += (adapter->share_intr ==
2302 adapter->intr.num_intrs = 1; 2735 VMXNET3_INTR_BUDDYSHARE) ? 0 :
2303 adapter->intr.type = VMXNET3_IT_MSIX; 2736 adapter->num_rx_queues;
2737 adapter->intr.num_intrs += 1; /* for link event */
2738
2739 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2740 VMXNET3_LINUX_MIN_MSIX_VECT
2741 ? adapter->intr.num_intrs :
2742 VMXNET3_LINUX_MIN_MSIX_VECT);
2743
2744 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2745 adapter->intr.msix_entries[vector].entry = vector;
2746
2747 err = vmxnet3_acquire_msix_vectors(adapter,
2748 adapter->intr.num_intrs);
2749 /* If we cannot allocate one MSIx vector per queue
2750 * then limit the number of rx queues to 1
2751 */
2752 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2753 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2754 || adapter->num_rx_queues != 2) {
2755 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2756 printk(KERN_ERR "Number of rx queues : 1\n");
2757 adapter->num_rx_queues = 1;
2758 adapter->intr.num_intrs =
2759 VMXNET3_LINUX_MIN_MSIX_VECT;
2760 }
2304 return; 2761 return;
2305 } 2762 }
2763 if (!err)
2764 return;
2765
2766 /* If we cannot allocate MSIx vectors use only one rx queue */
2767 printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
2768 "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
2769
2306 adapter->intr.type = VMXNET3_IT_MSI; 2770 adapter->intr.type = VMXNET3_IT_MSI;
2307 } 2771 }
2308 2772
@@ -2310,12 +2774,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2310 int err; 2774 int err;
2311 err = pci_enable_msi(adapter->pdev); 2775 err = pci_enable_msi(adapter->pdev);
2312 if (!err) { 2776 if (!err) {
2777 adapter->num_rx_queues = 1;
2313 adapter->intr.num_intrs = 1; 2778 adapter->intr.num_intrs = 1;
2314 return; 2779 return;
2315 } 2780 }
2316 } 2781 }
2317#endif /* CONFIG_PCI_MSI */ 2782#endif /* CONFIG_PCI_MSI */
2318 2783
2784 adapter->num_rx_queues = 1;
2785 printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
2319 adapter->intr.type = VMXNET3_IT_INTX; 2786 adapter->intr.type = VMXNET3_IT_INTX;
2320 2787
2321 /* INT-X related setting */ 2788 /* INT-X related setting */
@@ -2343,6 +2810,7 @@ vmxnet3_tx_timeout(struct net_device *netdev)
2343 2810
2344 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); 2811 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2345 schedule_work(&adapter->work); 2812 schedule_work(&adapter->work);
2813 netif_wake_queue(adapter->netdev);
2346} 2814}
2347 2815
2348 2816
@@ -2399,8 +2867,29 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2399 struct net_device *netdev; 2867 struct net_device *netdev;
2400 struct vmxnet3_adapter *adapter; 2868 struct vmxnet3_adapter *adapter;
2401 u8 mac[ETH_ALEN]; 2869 u8 mac[ETH_ALEN];
2870 int size;
2871 int num_tx_queues;
2872 int num_rx_queues;
2873
2874#ifdef VMXNET3_RSS
2875 if (enable_mq)
2876 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2877 (int)num_online_cpus());
2878 else
2879#endif
2880 num_rx_queues = 1;
2881
2882 if (enable_mq)
2883 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2884 (int)num_online_cpus());
2885 else
2886 num_tx_queues = 1;
2887
2888 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2889 max(num_tx_queues, num_rx_queues));
2890 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
2891 num_tx_queues, num_rx_queues);
2402 2892
2403 netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
2404 if (!netdev) { 2893 if (!netdev) {
2405 printk(KERN_ERR "Failed to alloc ethernet device for adapter " 2894 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2406 "%s\n", pci_name(pdev)); 2895 "%s\n", pci_name(pdev));
@@ -2422,9 +2911,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2422 goto err_alloc_shared; 2911 goto err_alloc_shared;
2423 } 2912 }
2424 2913
2425 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, 2914 adapter->num_rx_queues = num_rx_queues;
2426 sizeof(struct Vmxnet3_TxQueueDesc) + 2915 adapter->num_tx_queues = num_tx_queues;
2427 sizeof(struct Vmxnet3_RxQueueDesc), 2916
2917 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2918 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2919 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
2428 &adapter->queue_desc_pa); 2920 &adapter->queue_desc_pa);
2429 2921
2430 if (!adapter->tqd_start) { 2922 if (!adapter->tqd_start) {
@@ -2433,8 +2925,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2433 err = -ENOMEM; 2925 err = -ENOMEM;
2434 goto err_alloc_queue_desc; 2926 goto err_alloc_queue_desc;
2435 } 2927 }
2436 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start 2928 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2437 + 1); 2929 adapter->num_tx_queues);
2438 2930
2439 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); 2931 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2440 if (adapter->pm_conf == NULL) { 2932 if (adapter->pm_conf == NULL) {
@@ -2444,6 +2936,17 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2444 goto err_alloc_pm; 2936 goto err_alloc_pm;
2445 } 2937 }
2446 2938
2939#ifdef VMXNET3_RSS
2940
2941 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2942 if (adapter->rss_conf == NULL) {
2943 printk(KERN_ERR "Failed to allocate memory for %s\n",
2944 pci_name(pdev));
2945 err = -ENOMEM;
2946 goto err_alloc_rss;
2947 }
2948#endif /* VMXNET3_RSS */
2949
2447 err = vmxnet3_alloc_pci_resources(adapter, &dma64); 2950 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2448 if (err < 0) 2951 if (err < 0)
2449 goto err_alloc_pci; 2952 goto err_alloc_pci;
@@ -2471,18 +2974,48 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2471 vmxnet3_declare_features(adapter, dma64); 2974 vmxnet3_declare_features(adapter, dma64);
2472 2975
2473 adapter->dev_number = atomic_read(&devices_found); 2976 adapter->dev_number = atomic_read(&devices_found);
2977
2978 adapter->share_intr = irq_share_mode;
2979 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
2980 adapter->num_tx_queues != adapter->num_rx_queues)
2981 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
2982
2474 vmxnet3_alloc_intr_resources(adapter); 2983 vmxnet3_alloc_intr_resources(adapter);
2475 2984
2985#ifdef VMXNET3_RSS
2986 if (adapter->num_rx_queues > 1 &&
2987 adapter->intr.type == VMXNET3_IT_MSIX) {
2988 adapter->rss = true;
2989 printk(KERN_INFO "RSS is enabled.\n");
2990 } else {
2991 adapter->rss = false;
2992 }
2993#endif
2994
2476 vmxnet3_read_mac_addr(adapter, mac); 2995 vmxnet3_read_mac_addr(adapter, mac);
2477 memcpy(netdev->dev_addr, mac, netdev->addr_len); 2996 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2478 2997
2479 netdev->netdev_ops = &vmxnet3_netdev_ops; 2998 netdev->netdev_ops = &vmxnet3_netdev_ops;
2480 netdev->watchdog_timeo = 5 * HZ;
2481 vmxnet3_set_ethtool_ops(netdev); 2999 vmxnet3_set_ethtool_ops(netdev);
3000 netdev->watchdog_timeo = 5 * HZ;
2482 3001
2483 INIT_WORK(&adapter->work, vmxnet3_reset_work); 3002 INIT_WORK(&adapter->work, vmxnet3_reset_work);
2484 3003
2485 netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64); 3004 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3005 int i;
3006 for (i = 0; i < adapter->num_rx_queues; i++) {
3007 netif_napi_add(adapter->netdev,
3008 &adapter->rx_queue[i].napi,
3009 vmxnet3_poll_rx_only, 64);
3010 }
3011 } else {
3012 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3013 vmxnet3_poll, 64);
3014 }
3015
3016 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3017 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3018
2486 SET_NETDEV_DEV(netdev, &pdev->dev); 3019 SET_NETDEV_DEV(netdev, &pdev->dev);
2487 err = register_netdev(netdev); 3020 err = register_netdev(netdev);
2488 3021
@@ -2502,11 +3035,14 @@ err_register:
2502err_ver: 3035err_ver:
2503 vmxnet3_free_pci_resources(adapter); 3036 vmxnet3_free_pci_resources(adapter);
2504err_alloc_pci: 3037err_alloc_pci:
3038#ifdef VMXNET3_RSS
3039 kfree(adapter->rss_conf);
3040err_alloc_rss:
3041#endif
2505 kfree(adapter->pm_conf); 3042 kfree(adapter->pm_conf);
2506err_alloc_pm: 3043err_alloc_pm:
2507 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + 3044 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
2508 sizeof(struct Vmxnet3_RxQueueDesc), 3045 adapter->queue_desc_pa);
2509 adapter->tqd_start, adapter->queue_desc_pa);
2510err_alloc_queue_desc: 3046err_alloc_queue_desc:
2511 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3047 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2512 adapter->shared, adapter->shared_pa); 3048 adapter->shared, adapter->shared_pa);
@@ -2522,6 +3058,16 @@ vmxnet3_remove_device(struct pci_dev *pdev)
2522{ 3058{
2523 struct net_device *netdev = pci_get_drvdata(pdev); 3059 struct net_device *netdev = pci_get_drvdata(pdev);
2524 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3060 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3061 int size = 0;
3062 int num_rx_queues;
3063
3064#ifdef VMXNET3_RSS
3065 if (enable_mq)
3066 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3067 (int)num_online_cpus());
3068 else
3069#endif
3070 num_rx_queues = 1;
2525 3071
2526 flush_scheduled_work(); 3072 flush_scheduled_work();
2527 3073
@@ -2529,10 +3075,15 @@ vmxnet3_remove_device(struct pci_dev *pdev)
2529 3075
2530 vmxnet3_free_intr_resources(adapter); 3076 vmxnet3_free_intr_resources(adapter);
2531 vmxnet3_free_pci_resources(adapter); 3077 vmxnet3_free_pci_resources(adapter);
3078#ifdef VMXNET3_RSS
3079 kfree(adapter->rss_conf);
3080#endif
2532 kfree(adapter->pm_conf); 3081 kfree(adapter->pm_conf);
2533 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + 3082
2534 sizeof(struct Vmxnet3_RxQueueDesc), 3083 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2535 adapter->tqd_start, adapter->queue_desc_pa); 3084 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3085 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3086 adapter->queue_desc_pa);
2536 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3087 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2537 adapter->shared, adapter->shared_pa); 3088 adapter->shared, adapter->shared_pa);
2538 free_netdev(netdev); 3089 free_netdev(netdev);
@@ -2563,7 +3114,7 @@ vmxnet3_suspend(struct device *device)
2563 vmxnet3_free_intr_resources(adapter); 3114 vmxnet3_free_intr_resources(adapter);
2564 3115
2565 netif_device_detach(netdev); 3116 netif_device_detach(netdev);
2566 netif_stop_queue(netdev); 3117 netif_tx_stop_all_queues(netdev);
2567 3118
2568 /* Create wake-up filters. */ 3119 /* Create wake-up filters. */
2569 pmConf = adapter->pm_conf; 3120 pmConf = adapter->pm_conf;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b79070bcc92..8e17fc8a7fe 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -151,44 +151,42 @@ vmxnet3_get_stats(struct net_device *netdev)
151 struct UPT1_TxStats *devTxStats; 151 struct UPT1_TxStats *devTxStats;
152 struct UPT1_RxStats *devRxStats; 152 struct UPT1_RxStats *devRxStats;
153 struct net_device_stats *net_stats = &netdev->stats; 153 struct net_device_stats *net_stats = &netdev->stats;
154 int i;
154 155
155 adapter = netdev_priv(netdev); 156 adapter = netdev_priv(netdev);
156 157
157 /* Collect the dev stats into the shared area */ 158 /* Collect the dev stats into the shared area */
158 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 159 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
159 160
160 /* Assuming that we have a single queue device */
161 devTxStats = &adapter->tqd_start->stats;
162 devRxStats = &adapter->rqd_start->stats;
163
164 /* Get access to the driver stats per queue */
165 drvTxStats = &adapter->tx_queue.stats;
166 drvRxStats = &adapter->rx_queue.stats;
167
168 memset(net_stats, 0, sizeof(*net_stats)); 161 memset(net_stats, 0, sizeof(*net_stats));
162 for (i = 0; i < adapter->num_tx_queues; i++) {
163 devTxStats = &adapter->tqd_start[i].stats;
164 drvTxStats = &adapter->tx_queue[i].stats;
165 net_stats->tx_packets += devTxStats->ucastPktsTxOK +
166 devTxStats->mcastPktsTxOK +
167 devTxStats->bcastPktsTxOK;
168 net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
169 devTxStats->mcastBytesTxOK +
170 devTxStats->bcastBytesTxOK;
171 net_stats->tx_errors += devTxStats->pktsTxError;
172 net_stats->tx_dropped += drvTxStats->drop_total;
173 }
169 174
170 net_stats->rx_packets = devRxStats->ucastPktsRxOK + 175 for (i = 0; i < adapter->num_rx_queues; i++) {
171 devRxStats->mcastPktsRxOK + 176 devRxStats = &adapter->rqd_start[i].stats;
172 devRxStats->bcastPktsRxOK; 177 drvRxStats = &adapter->rx_queue[i].stats;
173 178 net_stats->rx_packets += devRxStats->ucastPktsRxOK +
174 net_stats->tx_packets = devTxStats->ucastPktsTxOK + 179 devRxStats->mcastPktsRxOK +
175 devTxStats->mcastPktsTxOK + 180 devRxStats->bcastPktsRxOK;
176 devTxStats->bcastPktsTxOK;
177
178 net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
179 devRxStats->mcastBytesRxOK +
180 devRxStats->bcastBytesRxOK;
181
182 net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
183 devTxStats->mcastBytesTxOK +
184 devTxStats->bcastBytesTxOK;
185 181
186 net_stats->rx_errors = devRxStats->pktsRxError; 182 net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
187 net_stats->tx_errors = devTxStats->pktsTxError; 183 devRxStats->mcastBytesRxOK +
188 net_stats->rx_dropped = drvRxStats->drop_total; 184 devRxStats->bcastBytesRxOK;
189 net_stats->tx_dropped = drvTxStats->drop_total;
190 net_stats->multicast = devRxStats->mcastPktsRxOK;
191 185
186 net_stats->rx_errors += devRxStats->pktsRxError;
187 net_stats->rx_dropped += drvRxStats->drop_total;
188 net_stats->multicast += devRxStats->mcastPktsRxOK;
189 }
192 return net_stats; 190 return net_stats;
193} 191}
194 192
@@ -307,24 +305,26 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
307 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 305 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
308 u8 *base; 306 u8 *base;
309 int i; 307 int i;
308 int j = 0;
310 309
311 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 310 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
312 311
313 /* this does assume each counter is 64-bit wide */ 312 /* this does assume each counter is 64-bit wide */
313/* TODO change this for multiple queues */
314 314
315 base = (u8 *)&adapter->tqd_start->stats; 315 base = (u8 *)&adapter->tqd_start[j].stats;
316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); 317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
318 318
319 base = (u8 *)&adapter->tx_queue.stats; 319 base = (u8 *)&adapter->tx_queue[j].stats;
320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); 321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
322 322
323 base = (u8 *)&adapter->rqd_start->stats; 323 base = (u8 *)&adapter->rqd_start[j].stats;
324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); 325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
326 326
327 base = (u8 *)&adapter->rx_queue.stats; 327 base = (u8 *)&adapter->rx_queue[j].stats;
328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); 329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
330 330
@@ -339,6 +339,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
339{ 339{
340 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 340 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
341 u32 *buf = p; 341 u32 *buf = p;
342 int i = 0;
342 343
343 memset(p, 0, vmxnet3_get_regs_len(netdev)); 344 memset(p, 0, vmxnet3_get_regs_len(netdev));
344 345
@@ -347,28 +348,29 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
347 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 348 /* Update vmxnet3_get_regs_len if we want to dump more registers */
348 349
349 /* make each ring use multiple of 16 bytes */ 350 /* make each ring use multiple of 16 bytes */
350 buf[0] = adapter->tx_queue.tx_ring.next2fill; 351/* TODO change this for multiple queues */
351 buf[1] = adapter->tx_queue.tx_ring.next2comp; 352 buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
352 buf[2] = adapter->tx_queue.tx_ring.gen; 353 buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
354 buf[2] = adapter->tx_queue[i].tx_ring.gen;
353 buf[3] = 0; 355 buf[3] = 0;
354 356
355 buf[4] = adapter->tx_queue.comp_ring.next2proc; 357 buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
356 buf[5] = adapter->tx_queue.comp_ring.gen; 358 buf[5] = adapter->tx_queue[i].comp_ring.gen;
357 buf[6] = adapter->tx_queue.stopped; 359 buf[6] = adapter->tx_queue[i].stopped;
358 buf[7] = 0; 360 buf[7] = 0;
359 361
360 buf[8] = adapter->rx_queue.rx_ring[0].next2fill; 362 buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
361 buf[9] = adapter->rx_queue.rx_ring[0].next2comp; 363 buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
362 buf[10] = adapter->rx_queue.rx_ring[0].gen; 364 buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
363 buf[11] = 0; 365 buf[11] = 0;
364 366
365 buf[12] = adapter->rx_queue.rx_ring[1].next2fill; 367 buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
366 buf[13] = adapter->rx_queue.rx_ring[1].next2comp; 368 buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
367 buf[14] = adapter->rx_queue.rx_ring[1].gen; 369 buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
368 buf[15] = 0; 370 buf[15] = 0;
369 371
370 buf[16] = adapter->rx_queue.comp_ring.next2proc; 372 buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
371 buf[17] = adapter->rx_queue.comp_ring.gen; 373 buf[17] = adapter->rx_queue[i].comp_ring.gen;
372 buf[18] = 0; 374 buf[18] = 0;
373 buf[19] = 0; 375 buf[19] = 0;
374} 376}
@@ -435,8 +437,10 @@ vmxnet3_get_ringparam(struct net_device *netdev,
435 param->rx_mini_max_pending = 0; 437 param->rx_mini_max_pending = 0;
436 param->rx_jumbo_max_pending = 0; 438 param->rx_jumbo_max_pending = 0;
437 439
438 param->rx_pending = adapter->rx_queue.rx_ring[0].size; 440 param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
439 param->tx_pending = adapter->tx_queue.tx_ring.size; 441 adapter->num_rx_queues;
442 param->tx_pending = adapter->tx_queue[0].tx_ring.size *
443 adapter->num_tx_queues;
440 param->rx_mini_pending = 0; 444 param->rx_mini_pending = 0;
441 param->rx_jumbo_pending = 0; 445 param->rx_jumbo_pending = 0;
442} 446}
@@ -480,8 +484,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
480 sz) != 0) 484 sz) != 0)
481 return -EINVAL; 485 return -EINVAL;
482 486
483 if (new_tx_ring_size == adapter->tx_queue.tx_ring.size && 487 if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
484 new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) { 488 new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
485 return 0; 489 return 0;
486 } 490 }
487 491
@@ -498,11 +502,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
498 502
499 /* recreate the rx queue and the tx queue based on the 503 /* recreate the rx queue and the tx queue based on the
500 * new sizes */ 504 * new sizes */
501 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 505 vmxnet3_tq_destroy_all(adapter);
502 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 506 vmxnet3_rq_destroy_all(adapter);
503 507
504 err = vmxnet3_create_queues(adapter, new_tx_ring_size, 508 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
505 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); 509 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
510
506 if (err) { 511 if (err) {
507 /* failed, most likely because of OOM, try default 512 /* failed, most likely because of OOM, try default
508 * size */ 513 * size */
@@ -535,6 +540,66 @@ out:
535} 540}
536 541
537 542
543static int
544vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
545 void *rules)
546{
547 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
548 switch (info->cmd) {
549 case ETHTOOL_GRXRINGS:
550 info->data = adapter->num_rx_queues;
551 return 0;
552 }
553 return -EOPNOTSUPP;
554}
555
556#ifdef VMXNET3_RSS
557static int
558vmxnet3_get_rss_indir(struct net_device *netdev,
559 struct ethtool_rxfh_indir *p)
560{
561 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
562 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
563 unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
564
565 p->size = rssConf->indTableSize;
566 while (n--)
567 p->ring_index[n] = rssConf->indTable[n];
568 return 0;
569
570}
571
572static int
573vmxnet3_set_rss_indir(struct net_device *netdev,
574 const struct ethtool_rxfh_indir *p)
575{
576 unsigned int i;
577 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
578 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
579
580 if (p->size != rssConf->indTableSize)
581 return -EINVAL;
582 for (i = 0; i < rssConf->indTableSize; i++) {
583 /*
584 * Return with error code if any of the queue indices
585 * is out of range
586 */
587 if (p->ring_index[i] < 0 ||
588 p->ring_index[i] >= adapter->num_rx_queues)
589 return -EINVAL;
590 }
591
592 for (i = 0; i < rssConf->indTableSize; i++)
593 rssConf->indTable[i] = p->ring_index[i];
594
595 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
596 VMXNET3_CMD_UPDATE_RSSIDT);
597
598 return 0;
599
600}
601#endif
602
538static struct ethtool_ops vmxnet3_ethtool_ops = { 603static struct ethtool_ops vmxnet3_ethtool_ops = {
539 .get_settings = vmxnet3_get_settings, 604 .get_settings = vmxnet3_get_settings,
540 .get_drvinfo = vmxnet3_get_drvinfo, 605 .get_drvinfo = vmxnet3_get_drvinfo,
@@ -558,6 +623,11 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
558 .get_ethtool_stats = vmxnet3_get_ethtool_stats, 623 .get_ethtool_stats = vmxnet3_get_ethtool_stats,
559 .get_ringparam = vmxnet3_get_ringparam, 624 .get_ringparam = vmxnet3_get_ringparam,
560 .set_ringparam = vmxnet3_set_ringparam, 625 .set_ringparam = vmxnet3_set_ringparam,
626 .get_rxnfc = vmxnet3_get_rxnfc,
627#ifdef VMXNET3_RSS
628 .get_rxfh_indir = vmxnet3_get_rss_indir,
629 .set_rxfh_indir = vmxnet3_set_rss_indir,
630#endif
561}; 631};
562 632
563void vmxnet3_set_ethtool_ops(struct net_device *netdev) 633void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index edf228843af..7fadeed37f0 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,11 +68,15 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01000E00 74#define VMXNET3_DRIVER_VERSION_NUM 0x01001000
75 75
76#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */
78 #define VMXNET3_RSS
79#endif
76 80
77/* 81/*
78 * Capabilities 82 * Capabilities
@@ -218,16 +222,19 @@ struct vmxnet3_tx_ctx {
218}; 222};
219 223
220struct vmxnet3_tx_queue { 224struct vmxnet3_tx_queue {
225 char name[IFNAMSIZ+8]; /* To identify interrupt */
226 struct vmxnet3_adapter *adapter;
221 spinlock_t tx_lock; 227 spinlock_t tx_lock;
222 struct vmxnet3_cmd_ring tx_ring; 228 struct vmxnet3_cmd_ring tx_ring;
223 struct vmxnet3_tx_buf_info *buf_info; 229 struct vmxnet3_tx_buf_info *buf_info;
224 struct vmxnet3_tx_data_ring data_ring; 230 struct vmxnet3_tx_data_ring data_ring;
225 struct vmxnet3_comp_ring comp_ring; 231 struct vmxnet3_comp_ring comp_ring;
226 struct Vmxnet3_TxQueueCtrl *shared; 232 struct Vmxnet3_TxQueueCtrl *shared;
227 struct vmxnet3_tq_driver_stats stats; 233 struct vmxnet3_tq_driver_stats stats;
228 bool stopped; 234 bool stopped;
229 int num_stop; /* # of times the queue is 235 int num_stop; /* # of times the queue is
230 * stopped */ 236 * stopped */
237 int qid;
231} __attribute__((__aligned__(SMP_CACHE_BYTES))); 238} __attribute__((__aligned__(SMP_CACHE_BYTES)));
232 239
233enum vmxnet3_rx_buf_type { 240enum vmxnet3_rx_buf_type {
@@ -259,6 +266,9 @@ struct vmxnet3_rq_driver_stats {
259}; 266};
260 267
261struct vmxnet3_rx_queue { 268struct vmxnet3_rx_queue {
269 char name[IFNAMSIZ + 8]; /* To identify interrupt */
270 struct vmxnet3_adapter *adapter;
271 struct napi_struct napi;
262 struct vmxnet3_cmd_ring rx_ring[2]; 272 struct vmxnet3_cmd_ring rx_ring[2];
263 struct vmxnet3_comp_ring comp_ring; 273 struct vmxnet3_comp_ring comp_ring;
264 struct vmxnet3_rx_ctx rx_ctx; 274 struct vmxnet3_rx_ctx rx_ctx;
@@ -271,7 +281,16 @@ struct vmxnet3_rx_queue {
271 struct vmxnet3_rq_driver_stats stats; 281 struct vmxnet3_rq_driver_stats stats;
272} __attribute__((__aligned__(SMP_CACHE_BYTES))); 282} __attribute__((__aligned__(SMP_CACHE_BYTES)));
273 283
274#define VMXNET3_LINUX_MAX_MSIX_VECT 1 284#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
285#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
286
287/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
288#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
289
290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
292#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */
293
275 294
276struct vmxnet3_intr { 295struct vmxnet3_intr {
277 enum vmxnet3_intr_mask_mode mask_mode; 296 enum vmxnet3_intr_mask_mode mask_mode;
@@ -279,27 +298,32 @@ struct vmxnet3_intr {
279 u8 num_intrs; /* # of intr vectors */ 298 u8 num_intrs; /* # of intr vectors */
280 u8 event_intr_idx; /* idx of the intr vector for event */ 299 u8 event_intr_idx; /* idx of the intr vector for event */
281 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ 300 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
301 char event_msi_vector_name[IFNAMSIZ+11];
282#ifdef CONFIG_PCI_MSI 302#ifdef CONFIG_PCI_MSI
283 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; 303 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
284#endif 304#endif
285}; 305};
286 306
307/* Interrupt sharing schemes, share_intr */
308#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
309#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
310#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
311
312
287#define VMXNET3_STATE_BIT_RESETTING 0 313#define VMXNET3_STATE_BIT_RESETTING 0
288#define VMXNET3_STATE_BIT_QUIESCED 1 314#define VMXNET3_STATE_BIT_QUIESCED 1
289struct vmxnet3_adapter { 315struct vmxnet3_adapter {
290 struct vmxnet3_tx_queue tx_queue; 316 struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
291 struct vmxnet3_rx_queue rx_queue; 317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
292 struct napi_struct napi; 318 struct vlan_group *vlan_grp;
293 struct vlan_group *vlan_grp; 319 struct vmxnet3_intr intr;
294 320 struct Vmxnet3_DriverShared *shared;
295 struct vmxnet3_intr intr; 321 struct Vmxnet3_PMConf *pm_conf;
296 322 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
297 struct Vmxnet3_DriverShared *shared; 323 struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
298 struct Vmxnet3_PMConf *pm_conf; 324 struct net_device *netdev;
299 struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */ 325 struct net_device_stats net_stats;
300 struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */ 326 struct pci_dev *pdev;
301 struct net_device *netdev;
302 struct pci_dev *pdev;
303 327
304 u8 __iomem *hw_addr0; /* for BAR 0 */ 328 u8 __iomem *hw_addr0; /* for BAR 0 */
305 u8 __iomem *hw_addr1; /* for BAR 1 */ 329 u8 __iomem *hw_addr1; /* for BAR 1 */
@@ -308,6 +332,12 @@ struct vmxnet3_adapter {
308 bool rxcsum; 332 bool rxcsum;
309 bool lro; 333 bool lro;
310 bool jumbo_frame; 334 bool jumbo_frame;
335#ifdef VMXNET3_RSS
336 struct UPT1_RSSConf *rss_conf;
337 bool rss;
338#endif
339 u32 num_rx_queues;
340 u32 num_tx_queues;
311 341
312 /* rx buffer related */ 342 /* rx buffer related */
313 unsigned skb_buf_size; 343 unsigned skb_buf_size;
@@ -327,6 +357,7 @@ struct vmxnet3_adapter {
327 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 357 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
328 358
329 int dev_number; 359 int dev_number;
360 int share_intr;
330}; 361};
331 362
332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 363#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -366,12 +397,10 @@ void
366vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); 397vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
367 398
368void 399void
369vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 400vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
370 struct vmxnet3_adapter *adapter);
371 401
372void 402void
373vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 403vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
374 struct vmxnet3_adapter *adapter);
375 404
376int 405int
377vmxnet3_create_queues(struct vmxnet3_adapter *adapter, 406vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 906a3ca3676..a0241fe72d8 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -19,19 +19,7 @@
19 19
20#include "vxge-traffic.h" 20#include "vxge-traffic.h"
21#include "vxge-config.h" 21#include "vxge-config.h"
22 22#include "vxge-main.h"
23static enum vxge_hw_status
24__vxge_hw_fifo_create(
25 struct __vxge_hw_vpath_handle *vpath_handle,
26 struct vxge_hw_fifo_attr *attr);
27
28static enum vxge_hw_status
29__vxge_hw_fifo_abort(
30 struct __vxge_hw_fifo *fifoh);
31
32static enum vxge_hw_status
33__vxge_hw_fifo_reset(
34 struct __vxge_hw_fifo *ringh);
35 23
36static enum vxge_hw_status 24static enum vxge_hw_status
37__vxge_hw_fifo_delete( 25__vxge_hw_fifo_delete(
@@ -71,53 +59,15 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
71 u32 size, 59 u32 size,
72 struct vxge_hw_mempool_dma *dma_object); 60 struct vxge_hw_mempool_dma *dma_object);
73 61
74
75static struct __vxge_hw_channel*
76__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
77 enum __vxge_hw_channel_type type, u32 length,
78 u32 per_dtr_space, void *userdata);
79
80static void 62static void
81__vxge_hw_channel_free( 63__vxge_hw_channel_free(
82 struct __vxge_hw_channel *channel); 64 struct __vxge_hw_channel *channel);
83 65
84static enum vxge_hw_status
85__vxge_hw_channel_initialize(
86 struct __vxge_hw_channel *channel);
87
88static enum vxge_hw_status
89__vxge_hw_channel_reset(
90 struct __vxge_hw_channel *channel);
91
92static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp); 66static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
93 67
94static enum vxge_hw_status 68static enum vxge_hw_status
95__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
96
97static enum vxge_hw_status
98__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config); 69__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
99 70
100static void
101__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
102
103static void
104__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
105
106static enum vxge_hw_status
107__vxge_hw_vpath_card_info_get(
108 u32 vp_id,
109 struct vxge_hw_vpath_reg __iomem *vpath_reg,
110 struct vxge_hw_device_hw_info *hw_info);
111
112static enum vxge_hw_status
113__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
114
115static void
116__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
117
118static enum vxge_hw_status
119__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
120
121static enum vxge_hw_status 71static enum vxge_hw_status
122__vxge_hw_device_register_poll( 72__vxge_hw_device_register_poll(
123 void __iomem *reg, 73 void __iomem *reg,
@@ -138,9 +88,10 @@ __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
138 88
139static struct vxge_hw_mempool* 89static struct vxge_hw_mempool*
140__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size, 90__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
141 u32 item_size, u32 private_size, u32 items_initial, 91 u32 item_size, u32 private_size, u32 items_initial,
142 u32 items_max, struct vxge_hw_mempool_cbs *mp_callback, 92 u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
143 void *userdata); 93 void *userdata);
94
144static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool); 95static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
145 96
146static enum vxge_hw_status 97static enum vxge_hw_status
@@ -153,52 +104,353 @@ vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
153static enum vxge_hw_status 104static enum vxge_hw_status
154__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg); 105__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
155 106
156static u64 107static void
157__vxge_hw_vpath_pci_func_mode_get(u32 vp_id, 108__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
158 struct vxge_hw_vpath_reg __iomem *vpath_reg);
159
160static u32
161__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
162 109
163static enum vxge_hw_status 110static enum vxge_hw_status
164__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, 111__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
165 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]); 112 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
166 113
167static enum vxge_hw_status 114static enum vxge_hw_status
168__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath); 115__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
116 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
169 117
118static void
119vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
120{
121 u64 val64;
170 122
171static enum vxge_hw_status 123 val64 = readq(&vp_reg->rxmac_vcfg0);
172__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id); 124 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
125 writeq(val64, &vp_reg->rxmac_vcfg0);
126 val64 = readq(&vp_reg->rxmac_vcfg0);
173 127
174static enum vxge_hw_status 128 return;
175__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, 129}
176 struct vxge_hw_device_hw_info *hw_info);
177 130
178static enum vxge_hw_status 131/*
179__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id); 132 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
133 */
134int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
135{
136 struct vxge_hw_vpath_reg __iomem *vp_reg;
137 struct __vxge_hw_virtualpath *vpath;
138 u64 val64, rxd_count, rxd_spat;
139 int count = 0, total_count = 0;
180 140
181static void 141 vpath = &hldev->virtual_paths[vp_id];
182__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id); 142 vp_reg = vpath->vp_reg;
183 143
184static enum vxge_hw_status 144 vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
185__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
186 u32 operation, u32 offset, u64 *stat);
187 145
188static enum vxge_hw_status 146 /* Check that the ring controller for this vpath has enough free RxDs
189__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, 147 * to send frames to the host. This is done by reading the
190 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats); 148 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
149 * RXD_SPAT value for the vpath.
150 */
151 val64 = readq(&vp_reg->prc_cfg6);
152 rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
153 /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
154 * leg room.
155 */
156 rxd_spat *= 2;
157
158 do {
159 mdelay(1);
160
161 rxd_count = readq(&vp_reg->prc_rxd_doorbell);
162
163 /* Check that the ring controller for this vpath does
164 * not have any frame in its pipeline.
165 */
166 val64 = readq(&vp_reg->frm_in_progress_cnt);
167 if ((rxd_count <= rxd_spat) || (val64 > 0))
168 count = 0;
169 else
170 count++;
171 total_count++;
172 } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
173 (total_count < VXGE_HW_MAX_POLLING_COUNT));
174
175 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
176 printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
177 __func__);
178
179 return total_count;
180}
181
182/* vxge_hw_device_wait_receive_idle - This function waits until all frames
183 * stored in the frame buffer for each vpath assigned to the given
184 * function (hldev) have been sent to the host.
185 */
186void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
187{
188 int i, total_count = 0;
189
190 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
191 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
192 continue;
193
194 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
195 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
196 break;
197 }
198}
191 199
192static enum vxge_hw_status 200static enum vxge_hw_status
193__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, 201vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
194 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats); 202 u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
203 u64 *steer_ctrl)
204{
205 struct vxge_hw_vpath_reg __iomem *vp_reg;
206 enum vxge_hw_status status;
207 u64 val64;
208 u32 retry = 0, max_retry = 100;
209
210 vp_reg = vpath->vp_reg;
211
212 if (vpath->vp_open) {
213 max_retry = 3;
214 spin_lock(&vpath->lock);
215 }
216
217 writeq(*data0, &vp_reg->rts_access_steer_data0);
218 writeq(*data1, &vp_reg->rts_access_steer_data1);
219 wmb();
220
221 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
222 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
223 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
224 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
225 *steer_ctrl;
226
227 status = __vxge_hw_pio_mem_write64(val64,
228 &vp_reg->rts_access_steer_ctrl,
229 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
230 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
231
232 /* The __vxge_hw_device_register_poll can udelay for a significant
233 * amount of time, blocking other proccess from the CPU. If it delays
234 * for ~5secs, a NMI error can occur. A way around this is to give up
235 * the processor via msleep, but this is not allowed is under lock.
236 * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
237 * 1sec and sleep for 10ms until the firmware operation has completed
238 * or timed-out.
239 */
240 while ((status != VXGE_HW_OK) && retry++ < max_retry) {
241 if (!vpath->vp_open)
242 msleep(20);
243 status = __vxge_hw_device_register_poll(
244 &vp_reg->rts_access_steer_ctrl,
245 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
246 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
247 }
248
249 if (status != VXGE_HW_OK)
250 goto out;
251
252 val64 = readq(&vp_reg->rts_access_steer_ctrl);
253 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
254 *data0 = readq(&vp_reg->rts_access_steer_data0);
255 *data1 = readq(&vp_reg->rts_access_steer_data1);
256 *steer_ctrl = val64;
257 } else
258 status = VXGE_HW_FAIL;
259
260out:
261 if (vpath->vp_open)
262 spin_unlock(&vpath->lock);
263 return status;
264}
265
266enum vxge_hw_status
267vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
268 u32 *minor, u32 *build)
269{
270 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
271 struct __vxge_hw_virtualpath *vpath;
272 enum vxge_hw_status status;
273
274 vpath = &hldev->virtual_paths[hldev->first_vp_id];
275
276 status = vxge_hw_vpath_fw_api(vpath,
277 VXGE_HW_FW_UPGRADE_ACTION,
278 VXGE_HW_FW_UPGRADE_MEMO,
279 VXGE_HW_FW_UPGRADE_OFFSET_READ,
280 &data0, &data1, &steer_ctrl);
281 if (status != VXGE_HW_OK)
282 return status;
283
284 *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
285 *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
286 *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
287
288 return status;
289}
290
291enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
292{
293 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
294 struct __vxge_hw_virtualpath *vpath;
295 enum vxge_hw_status status;
296 u32 ret;
297
298 vpath = &hldev->virtual_paths[hldev->first_vp_id];
299
300 status = vxge_hw_vpath_fw_api(vpath,
301 VXGE_HW_FW_UPGRADE_ACTION,
302 VXGE_HW_FW_UPGRADE_MEMO,
303 VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
304 &data0, &data1, &steer_ctrl);
305 if (status != VXGE_HW_OK) {
306 vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
307 goto exit;
308 }
309
310 ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
311 if (ret != 1) {
312 vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
313 __func__, ret);
314 status = VXGE_HW_FAIL;
315 }
316
317exit:
318 return status;
319}
320
321enum vxge_hw_status
322vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
323{
324 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
325 struct __vxge_hw_virtualpath *vpath;
326 enum vxge_hw_status status;
327 int ret_code, sec_code;
328
329 vpath = &hldev->virtual_paths[hldev->first_vp_id];
330
331 /* send upgrade start command */
332 status = vxge_hw_vpath_fw_api(vpath,
333 VXGE_HW_FW_UPGRADE_ACTION,
334 VXGE_HW_FW_UPGRADE_MEMO,
335 VXGE_HW_FW_UPGRADE_OFFSET_START,
336 &data0, &data1, &steer_ctrl);
337 if (status != VXGE_HW_OK) {
338 vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
339 __func__);
340 return status;
341 }
342
343 /* Transfer fw image to adapter 16 bytes at a time */
344 for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
345 steer_ctrl = 0;
346
347 /* The next 128bits of fwdata to be loaded onto the adapter */
348 data0 = *((u64 *)fwdata);
349 data1 = *((u64 *)fwdata + 1);
350
351 status = vxge_hw_vpath_fw_api(vpath,
352 VXGE_HW_FW_UPGRADE_ACTION,
353 VXGE_HW_FW_UPGRADE_MEMO,
354 VXGE_HW_FW_UPGRADE_OFFSET_SEND,
355 &data0, &data1, &steer_ctrl);
356 if (status != VXGE_HW_OK) {
357 vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
358 __func__);
359 goto out;
360 }
361
362 ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
363 switch (ret_code) {
364 case VXGE_HW_FW_UPGRADE_OK:
365 /* All OK, send next 16 bytes. */
366 break;
367 case VXGE_FW_UPGRADE_BYTES2SKIP:
368 /* skip bytes in the stream */
369 fwdata += (data0 >> 8) & 0xFFFFFFFF;
370 break;
371 case VXGE_HW_FW_UPGRADE_DONE:
372 goto out;
373 case VXGE_HW_FW_UPGRADE_ERR:
374 sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
375 switch (sec_code) {
376 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
377 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
378 printk(KERN_ERR
379 "corrupted data from .ncf file\n");
380 break;
381 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
382 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
383 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
384 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
385 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
386 printk(KERN_ERR "invalid .ncf file\n");
387 break;
388 case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
389 printk(KERN_ERR "buffer overflow\n");
390 break;
391 case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
392 printk(KERN_ERR "failed to flash the image\n");
393 break;
394 case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
395 printk(KERN_ERR
396 "generic error. Unknown error type\n");
397 break;
398 default:
399 printk(KERN_ERR "Unknown error of type %d\n",
400 sec_code);
401 break;
402 }
403 status = VXGE_HW_FAIL;
404 goto out;
405 default:
406 printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
407 status = VXGE_HW_FAIL;
408 goto out;
409 }
410 /* point to next 16 bytes */
411 fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
412 }
413out:
414 return status;
415}
416
417enum vxge_hw_status
418vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
419 struct eprom_image *img)
420{
421 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
422 struct __vxge_hw_virtualpath *vpath;
423 enum vxge_hw_status status;
424 int i;
425
426 vpath = &hldev->virtual_paths[hldev->first_vp_id];
427
428 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
429 data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
430 data1 = steer_ctrl = 0;
431
432 status = vxge_hw_vpath_fw_api(vpath,
433 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
434 VXGE_HW_FW_API_GET_EPROM_REV,
435 0, &data0, &data1, &steer_ctrl);
436 if (status != VXGE_HW_OK)
437 break;
438
439 img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
440 img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
441 img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
442 img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
443 }
444
445 return status;
446}
195 447
196/* 448/*
197 * __vxge_hw_channel_allocate - Allocate memory for channel 449 * __vxge_hw_channel_allocate - Allocate memory for channel
198 * This function allocates required memory for the channel and various arrays 450 * This function allocates required memory for the channel and various arrays
199 * in the channel 451 * in the channel
200 */ 452 */
201struct __vxge_hw_channel* 453static struct __vxge_hw_channel *
202__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, 454__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
203 enum __vxge_hw_channel_type type, 455 enum __vxge_hw_channel_type type,
204 u32 length, u32 per_dtr_space, void *userdata) 456 u32 length, u32 per_dtr_space, void *userdata)
@@ -269,7 +521,7 @@ exit0:
269 * This function deallocates memory from the channel and various arrays 521 * This function deallocates memory from the channel and various arrays
270 * in the channel 522 * in the channel
271 */ 523 */
272void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) 524static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
273{ 525{
274 kfree(channel->work_arr); 526 kfree(channel->work_arr);
275 kfree(channel->free_arr); 527 kfree(channel->free_arr);
@@ -283,7 +535,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
283 * This function initializes a channel by properly setting the 535 * This function initializes a channel by properly setting the
284 * various references 536 * various references
285 */ 537 */
286enum vxge_hw_status 538static enum vxge_hw_status
287__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) 539__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
288{ 540{
289 u32 i; 541 u32 i;
@@ -318,7 +570,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
318 * __vxge_hw_channel_reset - Resets a channel 570 * __vxge_hw_channel_reset - Resets a channel
319 * This function resets a channel by properly setting the various references 571 * This function resets a channel by properly setting the various references
320 */ 572 */
321enum vxge_hw_status 573static enum vxge_hw_status
322__vxge_hw_channel_reset(struct __vxge_hw_channel *channel) 574__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
323{ 575{
324 u32 i; 576 u32 i;
@@ -345,8 +597,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
345 * Initialize certain PCI/PCI-X configuration registers 597 * Initialize certain PCI/PCI-X configuration registers
346 * with recommended values. Save config space for future hw resets. 598 * with recommended values. Save config space for future hw resets.
347 */ 599 */
348void 600static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
349__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
350{ 601{
351 u16 cmd = 0; 602 u16 cmd = 0;
352 603
@@ -390,7 +641,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
390 return ret; 641 return ret;
391} 642}
392 643
393 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset 644/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
394 * in progress 645 * in progress
395 * This routine checks the vpath reset in progress register is turned zero 646 * This routine checks the vpath reset in progress register is turned zero
396 */ 647 */
@@ -435,7 +686,7 @@ exit:
435 * register location pointers in the device object. It waits until the ric is 686 * register location pointers in the device object. It waits until the ric is
436 * completed initializing registers. 687 * completed initializing registers.
437 */ 688 */
438enum vxge_hw_status 689static enum vxge_hw_status
439__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) 690__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
440{ 691{
441 u64 val64; 692 u64 val64;
@@ -496,26 +747,6 @@ exit:
496} 747}
497 748
498/* 749/*
499 * __vxge_hw_device_id_get
500 * This routine returns sets the device id and revision numbers into the device
501 * structure
502 */
503void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
504{
505 u64 val64;
506
507 val64 = readq(&hldev->common_reg->titan_asic_id);
508 hldev->device_id =
509 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
510
511 hldev->major_revision =
512 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
513
514 hldev->minor_revision =
515 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
516}
517
518/*
519 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver 750 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
520 * This routine returns the Access Rights of the driver 751 * This routine returns the Access Rights of the driver
521 */ 752 */
@@ -568,10 +799,25 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
568} 799}
569 800
570/* 801/*
802 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
803 * Returns the function number of the vpath.
804 */
805static u32
806__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
807{
808 u64 val64;
809
810 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
811
812 return
813 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
814}
815
816/*
571 * __vxge_hw_device_host_info_get 817 * __vxge_hw_device_host_info_get
572 * This routine returns the host type assignments 818 * This routine returns the host type assignments
573 */ 819 */
574void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) 820static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
575{ 821{
576 u64 val64; 822 u64 val64;
577 u32 i; 823 u32 i;
@@ -584,16 +830,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
584 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); 830 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
585 831
586 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 832 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
587
588 if (!(hldev->vpath_assignments & vxge_mBIT(i))) 833 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
589 continue; 834 continue;
590 835
591 hldev->func_id = 836 hldev->func_id =
592 __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]); 837 __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
593 838
594 hldev->access_rights = __vxge_hw_device_access_rights_get( 839 hldev->access_rights = __vxge_hw_device_access_rights_get(
595 hldev->host_type, hldev->func_id); 840 hldev->host_type, hldev->func_id);
596 841
842 hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
843 hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
844
597 hldev->first_vp_id = i; 845 hldev->first_vp_id = i;
598 break; 846 break;
599 } 847 }
@@ -634,7 +882,8 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
634 * __vxge_hw_device_initialize 882 * __vxge_hw_device_initialize
635 * Initialize Titan-V hardware. 883 * Initialize Titan-V hardware.
636 */ 884 */
637enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) 885static enum vxge_hw_status
886__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
638{ 887{
639 enum vxge_hw_status status = VXGE_HW_OK; 888 enum vxge_hw_status status = VXGE_HW_OK;
640 889
@@ -650,6 +899,196 @@ exit:
650 return status; 899 return status;
651} 900}
652 901
902/*
903 * __vxge_hw_vpath_fw_ver_get - Get the fw version
904 * Returns FW Version
905 */
906static enum vxge_hw_status
907__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
908 struct vxge_hw_device_hw_info *hw_info)
909{
910 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
911 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
912 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
913 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
914 u64 data0, data1 = 0, steer_ctrl = 0;
915 enum vxge_hw_status status;
916
917 status = vxge_hw_vpath_fw_api(vpath,
918 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
919 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
920 0, &data0, &data1, &steer_ctrl);
921 if (status != VXGE_HW_OK)
922 goto exit;
923
924 fw_date->day =
925 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
926 fw_date->month =
927 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
928 fw_date->year =
929 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
930
931 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
932 fw_date->month, fw_date->day, fw_date->year);
933
934 fw_version->major =
935 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
936 fw_version->minor =
937 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
938 fw_version->build =
939 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
940
941 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
942 fw_version->major, fw_version->minor, fw_version->build);
943
944 flash_date->day =
945 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
946 flash_date->month =
947 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
948 flash_date->year =
949 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
950
951 snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
952 flash_date->month, flash_date->day, flash_date->year);
953
954 flash_version->major =
955 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
956 flash_version->minor =
957 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
958 flash_version->build =
959 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
960
961 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
962 flash_version->major, flash_version->minor,
963 flash_version->build);
964
965exit:
966 return status;
967}
968
969/*
970 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
971 * part number and product description.
972 */
973static enum vxge_hw_status
974__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
975 struct vxge_hw_device_hw_info *hw_info)
976{
977 enum vxge_hw_status status;
978 u64 data0, data1 = 0, steer_ctrl = 0;
979 u8 *serial_number = hw_info->serial_number;
980 u8 *part_number = hw_info->part_number;
981 u8 *product_desc = hw_info->product_desc;
982 u32 i, j = 0;
983
984 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
985
986 status = vxge_hw_vpath_fw_api(vpath,
987 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
988 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
989 0, &data0, &data1, &steer_ctrl);
990 if (status != VXGE_HW_OK)
991 return status;
992
993 ((u64 *)serial_number)[0] = be64_to_cpu(data0);
994 ((u64 *)serial_number)[1] = be64_to_cpu(data1);
995
996 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
997 data1 = steer_ctrl = 0;
998
999 status = vxge_hw_vpath_fw_api(vpath,
1000 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
1001 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
1002 0, &data0, &data1, &steer_ctrl);
1003 if (status != VXGE_HW_OK)
1004 return status;
1005
1006 ((u64 *)part_number)[0] = be64_to_cpu(data0);
1007 ((u64 *)part_number)[1] = be64_to_cpu(data1);
1008
1009 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
1010 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
1011 data0 = i;
1012 data1 = steer_ctrl = 0;
1013
1014 status = vxge_hw_vpath_fw_api(vpath,
1015 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
1016 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
1017 0, &data0, &data1, &steer_ctrl);
1018 if (status != VXGE_HW_OK)
1019 return status;
1020
1021 ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
1022 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
1023 }
1024
1025 return status;
1026}
1027
1028/*
1029 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
1030 * Returns pci function mode
1031 */
1032static enum vxge_hw_status
1033__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
1034 struct vxge_hw_device_hw_info *hw_info)
1035{
1036 u64 data0, data1 = 0, steer_ctrl = 0;
1037 enum vxge_hw_status status;
1038
1039 data0 = 0;
1040
1041 status = vxge_hw_vpath_fw_api(vpath,
1042 VXGE_HW_FW_API_GET_FUNC_MODE,
1043 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
1044 0, &data0, &data1, &steer_ctrl);
1045 if (status != VXGE_HW_OK)
1046 return status;
1047
1048 hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
1049 return status;
1050}
1051
1052/*
1053 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
1054 * from MAC address table.
1055 */
1056static enum vxge_hw_status
1057__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
1058 u8 *macaddr, u8 *macaddr_mask)
1059{
1060 u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1061 data0 = 0, data1 = 0, steer_ctrl = 0;
1062 enum vxge_hw_status status;
1063 int i;
1064
1065 do {
1066 status = vxge_hw_vpath_fw_api(vpath, action,
1067 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1068 0, &data0, &data1, &steer_ctrl);
1069 if (status != VXGE_HW_OK)
1070 goto exit;
1071
1072 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
1073 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
1074 data1);
1075
1076 for (i = ETH_ALEN; i > 0; i--) {
1077 macaddr[i - 1] = (u8) (data0 & 0xFF);
1078 data0 >>= 8;
1079
1080 macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
1081 data1 >>= 8;
1082 }
1083
1084 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
1085 data0 = 0, data1 = 0, steer_ctrl = 0;
1086
1087 } while (!is_valid_ether_addr(macaddr));
1088exit:
1089 return status;
1090}
1091
653/** 1092/**
654 * vxge_hw_device_hw_info_get - Get the hw information 1093 * vxge_hw_device_hw_info_get - Get the hw information
655 * Returns the vpath mask that has the bits set for each vpath allocated 1094 * Returns the vpath mask that has the bits set for each vpath allocated
@@ -665,9 +1104,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
665 struct vxge_hw_toc_reg __iomem *toc; 1104 struct vxge_hw_toc_reg __iomem *toc;
666 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; 1105 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
667 struct vxge_hw_common_reg __iomem *common_reg; 1106 struct vxge_hw_common_reg __iomem *common_reg;
668 struct vxge_hw_vpath_reg __iomem *vpath_reg;
669 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; 1107 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
670 enum vxge_hw_status status; 1108 enum vxge_hw_status status;
1109 struct __vxge_hw_virtualpath vpath;
671 1110
672 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); 1111 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
673 1112
@@ -702,7 +1141,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
702 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) 1141 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
703 (bar0 + val64); 1142 (bar0 + val64);
704 1143
705 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg); 1144 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
706 if (__vxge_hw_device_access_rights_get(hw_info->host_type, 1145 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
707 hw_info->func_id) & 1146 hw_info->func_id) &
708 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { 1147 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
@@ -718,16 +1157,19 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
718 1157
719 val64 = readq(&toc->toc_vpath_pointer[i]); 1158 val64 = readq(&toc->toc_vpath_pointer[i]);
720 1159
721 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); 1160 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1161 (bar0 + val64);
1162 vpath.vp_open = 0;
722 1163
723 hw_info->function_mode = 1164 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
724 __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg); 1165 if (status != VXGE_HW_OK)
1166 goto exit;
725 1167
726 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info); 1168 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
727 if (status != VXGE_HW_OK) 1169 if (status != VXGE_HW_OK)
728 goto exit; 1170 goto exit;
729 1171
730 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info); 1172 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
731 if (status != VXGE_HW_OK) 1173 if (status != VXGE_HW_OK)
732 goto exit; 1174 goto exit;
733 1175
@@ -735,14 +1177,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
735 } 1177 }
736 1178
737 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1179 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
738
739 if (!((hw_info->vpath_mask) & vxge_mBIT(i))) 1180 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
740 continue; 1181 continue;
741 1182
742 val64 = readq(&toc->toc_vpath_pointer[i]); 1183 val64 = readq(&toc->toc_vpath_pointer[i]);
743 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); 1184 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1185 (bar0 + val64);
1186 vpath.vp_open = 0;
744 1187
745 status = __vxge_hw_vpath_addr_get(i, vpath_reg, 1188 status = __vxge_hw_vpath_addr_get(&vpath,
746 hw_info->mac_addrs[i], 1189 hw_info->mac_addrs[i],
747 hw_info->mac_addr_masks[i]); 1190 hw_info->mac_addr_masks[i]);
748 if (status != VXGE_HW_OK) 1191 if (status != VXGE_HW_OK)
@@ -776,14 +1219,12 @@ vxge_hw_device_initialize(
776 if (status != VXGE_HW_OK) 1219 if (status != VXGE_HW_OK)
777 goto exit; 1220 goto exit;
778 1221
779 hldev = (struct __vxge_hw_device *) 1222 hldev = vzalloc(sizeof(struct __vxge_hw_device));
780 vmalloc(sizeof(struct __vxge_hw_device));
781 if (hldev == NULL) { 1223 if (hldev == NULL) {
782 status = VXGE_HW_ERR_OUT_OF_MEMORY; 1224 status = VXGE_HW_ERR_OUT_OF_MEMORY;
783 goto exit; 1225 goto exit;
784 } 1226 }
785 1227
786 memset(hldev, 0, sizeof(struct __vxge_hw_device));
787 hldev->magic = VXGE_HW_DEVICE_MAGIC; 1228 hldev->magic = VXGE_HW_DEVICE_MAGIC;
788 1229
789 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); 1230 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
@@ -806,7 +1247,6 @@ vxge_hw_device_initialize(
806 vfree(hldev); 1247 vfree(hldev);
807 goto exit; 1248 goto exit;
808 } 1249 }
809 __vxge_hw_device_id_get(hldev);
810 1250
811 __vxge_hw_device_host_info_get(hldev); 1251 __vxge_hw_device_host_info_get(hldev);
812 1252
@@ -814,7 +1254,6 @@ vxge_hw_device_initialize(
814 nblocks++; 1254 nblocks++;
815 1255
816 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1256 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
817
818 if (!(hldev->vpath_assignments & vxge_mBIT(i))) 1257 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
819 continue; 1258 continue;
820 1259
@@ -839,7 +1278,6 @@ vxge_hw_device_initialize(
839 } 1278 }
840 1279
841 status = __vxge_hw_device_initialize(hldev); 1280 status = __vxge_hw_device_initialize(hldev);
842
843 if (status != VXGE_HW_OK) { 1281 if (status != VXGE_HW_OK) {
844 vxge_hw_device_terminate(hldev); 1282 vxge_hw_device_terminate(hldev);
845 goto exit; 1283 goto exit;
@@ -876,7 +1314,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
876 enum vxge_hw_status status = VXGE_HW_OK; 1314 enum vxge_hw_status status = VXGE_HW_OK;
877 1315
878 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1316 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
879
880 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || 1317 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
881 (hldev->virtual_paths[i].vp_open == 1318 (hldev->virtual_paths[i].vp_open ==
882 VXGE_HW_VP_NOT_OPEN)) 1319 VXGE_HW_VP_NOT_OPEN))
@@ -1165,7 +1602,6 @@ exit:
1165 * It can be used to set or reset Pause frame generation or reception 1602 * It can be used to set or reset Pause frame generation or reception
1166 * support of the NIC. 1603 * support of the NIC.
1167 */ 1604 */
1168
1169enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, 1605enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1170 u32 port, u32 tx, u32 rx) 1606 u32 port, u32 tx, u32 rx)
1171{ 1607{
@@ -1409,7 +1845,6 @@ exit:
1409/* 1845/*
1410 * __vxge_hw_ring_create - Create a Ring 1846 * __vxge_hw_ring_create - Create a Ring
1411 * This function creates Ring and initializes it. 1847 * This function creates Ring and initializes it.
1412 *
1413 */ 1848 */
1414static enum vxge_hw_status 1849static enum vxge_hw_status
1415__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, 1850__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
@@ -1627,15 +2062,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1627 * allocate new memblock and its private part at once. 2062 * allocate new memblock and its private part at once.
1628 * This helps to minimize memory usage a lot. */ 2063 * This helps to minimize memory usage a lot. */
1629 mempool->memblocks_priv_arr[i] = 2064 mempool->memblocks_priv_arr[i] =
1630 vmalloc(mempool->items_priv_size * n_items); 2065 vzalloc(mempool->items_priv_size * n_items);
1631 if (mempool->memblocks_priv_arr[i] == NULL) { 2066 if (mempool->memblocks_priv_arr[i] == NULL) {
1632 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2067 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1633 goto exit; 2068 goto exit;
1634 } 2069 }
1635 2070
1636 memset(mempool->memblocks_priv_arr[i], 0,
1637 mempool->items_priv_size * n_items);
1638
1639 /* allocate DMA-capable memblock */ 2071 /* allocate DMA-capable memblock */
1640 mempool->memblocks_arr[i] = 2072 mempool->memblocks_arr[i] =
1641 __vxge_hw_blockpool_malloc(mempool->devh, 2073 __vxge_hw_blockpool_malloc(mempool->devh,
@@ -1707,13 +2139,11 @@ __vxge_hw_mempool_create(
1707 goto exit; 2139 goto exit;
1708 } 2140 }
1709 2141
1710 mempool = (struct vxge_hw_mempool *) 2142 mempool = vzalloc(sizeof(struct vxge_hw_mempool));
1711 vmalloc(sizeof(struct vxge_hw_mempool));
1712 if (mempool == NULL) { 2143 if (mempool == NULL) {
1713 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2144 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1714 goto exit; 2145 goto exit;
1715 } 2146 }
1716 memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1717 2147
1718 mempool->devh = devh; 2148 mempool->devh = devh;
1719 mempool->memblock_size = memblock_size; 2149 mempool->memblock_size = memblock_size;
@@ -1733,31 +2163,27 @@ __vxge_hw_mempool_create(
1733 2163
1734 /* allocate array of memblocks */ 2164 /* allocate array of memblocks */
1735 mempool->memblocks_arr = 2165 mempool->memblocks_arr =
1736 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); 2166 vzalloc(sizeof(void *) * mempool->memblocks_max);
1737 if (mempool->memblocks_arr == NULL) { 2167 if (mempool->memblocks_arr == NULL) {
1738 __vxge_hw_mempool_destroy(mempool); 2168 __vxge_hw_mempool_destroy(mempool);
1739 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2169 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1740 mempool = NULL; 2170 mempool = NULL;
1741 goto exit; 2171 goto exit;
1742 } 2172 }
1743 memset(mempool->memblocks_arr, 0,
1744 sizeof(void *) * mempool->memblocks_max);
1745 2173
1746 /* allocate array of private parts of items per memblocks */ 2174 /* allocate array of private parts of items per memblocks */
1747 mempool->memblocks_priv_arr = 2175 mempool->memblocks_priv_arr =
1748 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); 2176 vzalloc(sizeof(void *) * mempool->memblocks_max);
1749 if (mempool->memblocks_priv_arr == NULL) { 2177 if (mempool->memblocks_priv_arr == NULL) {
1750 __vxge_hw_mempool_destroy(mempool); 2178 __vxge_hw_mempool_destroy(mempool);
1751 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2179 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1752 mempool = NULL; 2180 mempool = NULL;
1753 goto exit; 2181 goto exit;
1754 } 2182 }
1755 memset(mempool->memblocks_priv_arr, 0,
1756 sizeof(void *) * mempool->memblocks_max);
1757 2183
1758 /* allocate array of memblocks DMA objects */ 2184 /* allocate array of memblocks DMA objects */
1759 mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) 2185 mempool->memblocks_dma_arr =
1760 vmalloc(sizeof(struct vxge_hw_mempool_dma) * 2186 vzalloc(sizeof(struct vxge_hw_mempool_dma) *
1761 mempool->memblocks_max); 2187 mempool->memblocks_max);
1762 2188
1763 if (mempool->memblocks_dma_arr == NULL) { 2189 if (mempool->memblocks_dma_arr == NULL) {
@@ -1766,20 +2192,15 @@ __vxge_hw_mempool_create(
1766 mempool = NULL; 2192 mempool = NULL;
1767 goto exit; 2193 goto exit;
1768 } 2194 }
1769 memset(mempool->memblocks_dma_arr, 0,
1770 sizeof(struct vxge_hw_mempool_dma) *
1771 mempool->memblocks_max);
1772 2195
1773 /* allocate hash array of items */ 2196 /* allocate hash array of items */
1774 mempool->items_arr = 2197 mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
1775 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1776 if (mempool->items_arr == NULL) { 2198 if (mempool->items_arr == NULL) {
1777 __vxge_hw_mempool_destroy(mempool); 2199 __vxge_hw_mempool_destroy(mempool);
1778 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2200 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1779 mempool = NULL; 2201 mempool = NULL;
1780 goto exit; 2202 goto exit;
1781 } 2203 }
1782 memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1783 2204
1784 /* calculate initial number of memblocks */ 2205 /* calculate initial number of memblocks */
1785 memblocks_to_allocate = (mempool->items_initial + 2206 memblocks_to_allocate = (mempool->items_initial +
@@ -1845,7 +2266,7 @@ static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1845 * __vxge_hw_device_fifo_config_check - Check fifo configuration. 2266 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1846 * Check the fifo configuration 2267 * Check the fifo configuration
1847 */ 2268 */
1848enum vxge_hw_status 2269static enum vxge_hw_status
1849__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) 2270__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1850{ 2271{
1851 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || 2272 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
@@ -1893,7 +2314,7 @@ __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1893 * __vxge_hw_device_config_check - Check device configuration. 2314 * __vxge_hw_device_config_check - Check device configuration.
1894 * Check the device configuration 2315 * Check the device configuration
1895 */ 2316 */
1896enum vxge_hw_status 2317static enum vxge_hw_status
1897__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) 2318__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1898{ 2319{
1899 u32 i; 2320 u32 i;
@@ -2453,7 +2874,7 @@ __vxge_hw_fifo_mempool_item_alloc(
2453 * __vxge_hw_fifo_create - Create a FIFO 2874 * __vxge_hw_fifo_create - Create a FIFO
2454 * This function creates FIFO and initializes it. 2875 * This function creates FIFO and initializes it.
2455 */ 2876 */
2456enum vxge_hw_status 2877static enum vxge_hw_status
2457__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, 2878__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2458 struct vxge_hw_fifo_attr *attr) 2879 struct vxge_hw_fifo_attr *attr)
2459{ 2880{
@@ -2617,7 +3038,8 @@ static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2617 * __vxge_hw_fifo_delete - Removes the FIFO 3038 * __vxge_hw_fifo_delete - Removes the FIFO
2618 * This function freeup the memory pool and removes the FIFO 3039 * This function freeup the memory pool and removes the FIFO
2619 */ 3040 */
2620enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) 3041static enum vxge_hw_status
3042__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2621{ 3043{
2622 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; 3044 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2623 3045
@@ -2675,297 +3097,6 @@ exit:
2675 return status; 3097 return status;
2676} 3098}
2677 3099
2678/*
2679 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2680 * Returns the function number of the vpath.
2681 */
2682static u32
2683__vxge_hw_vpath_func_id_get(u32 vp_id,
2684 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2685{
2686 u64 val64;
2687
2688 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2689
2690 return
2691 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2692}
2693
2694/*
2695 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2696 */
2697static inline void
2698__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2699 u64 dta_struct_sel)
2700{
2701 writeq(0, &vpath_reg->rts_access_steer_ctrl);
2702 wmb();
2703 writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2704 writeq(0, &vpath_reg->rts_access_steer_data1);
2705 wmb();
2706}
2707
2708
2709/*
2710 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2711 * part number and product description.
2712 */
2713static enum vxge_hw_status
2714__vxge_hw_vpath_card_info_get(
2715 u32 vp_id,
2716 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2717 struct vxge_hw_device_hw_info *hw_info)
2718{
2719 u32 i, j;
2720 u64 val64;
2721 u64 data1 = 0ULL;
2722 u64 data2 = 0ULL;
2723 enum vxge_hw_status status = VXGE_HW_OK;
2724 u8 *serial_number = hw_info->serial_number;
2725 u8 *part_number = hw_info->part_number;
2726 u8 *product_desc = hw_info->product_desc;
2727
2728 __vxge_hw_read_rts_ds(vpath_reg,
2729 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2730
2731 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2732 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2733 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2734 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2735 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2736 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2737
2738 status = __vxge_hw_pio_mem_write64(val64,
2739 &vpath_reg->rts_access_steer_ctrl,
2740 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2741 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2742
2743 if (status != VXGE_HW_OK)
2744 return status;
2745
2746 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2747
2748 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2749 data1 = readq(&vpath_reg->rts_access_steer_data0);
2750 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2751
2752 data2 = readq(&vpath_reg->rts_access_steer_data1);
2753 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2754 status = VXGE_HW_OK;
2755 } else
2756 *serial_number = 0;
2757
2758 __vxge_hw_read_rts_ds(vpath_reg,
2759 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2760
2761 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2762 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2763 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2764 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2765 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2766 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2767
2768 status = __vxge_hw_pio_mem_write64(val64,
2769 &vpath_reg->rts_access_steer_ctrl,
2770 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2771 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2772
2773 if (status != VXGE_HW_OK)
2774 return status;
2775
2776 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2777
2778 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2779
2780 data1 = readq(&vpath_reg->rts_access_steer_data0);
2781 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2782
2783 data2 = readq(&vpath_reg->rts_access_steer_data1);
2784 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2785
2786 status = VXGE_HW_OK;
2787
2788 } else
2789 *part_number = 0;
2790
2791 j = 0;
2792
2793 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2794 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2795
2796 __vxge_hw_read_rts_ds(vpath_reg, i);
2797
2798 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2799 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2800 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2801 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2802 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2803 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2804
2805 status = __vxge_hw_pio_mem_write64(val64,
2806 &vpath_reg->rts_access_steer_ctrl,
2807 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2808 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2809
2810 if (status != VXGE_HW_OK)
2811 return status;
2812
2813 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2814
2815 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2816
2817 data1 = readq(&vpath_reg->rts_access_steer_data0);
2818 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2819
2820 data2 = readq(&vpath_reg->rts_access_steer_data1);
2821 ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2822
2823 status = VXGE_HW_OK;
2824 } else
2825 *product_desc = 0;
2826 }
2827
2828 return status;
2829}
2830
2831/*
2832 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2833 * Returns FW Version
2834 */
2835static enum vxge_hw_status
2836__vxge_hw_vpath_fw_ver_get(
2837 u32 vp_id,
2838 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2839 struct vxge_hw_device_hw_info *hw_info)
2840{
2841 u64 val64;
2842 u64 data1 = 0ULL;
2843 u64 data2 = 0ULL;
2844 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2845 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2846 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2847 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2848 enum vxge_hw_status status = VXGE_HW_OK;
2849
2850 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2851 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2852 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2853 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2854 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2855 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2856
2857 status = __vxge_hw_pio_mem_write64(val64,
2858 &vpath_reg->rts_access_steer_ctrl,
2859 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2860 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2861
2862 if (status != VXGE_HW_OK)
2863 goto exit;
2864
2865 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2866
2867 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2868
2869 data1 = readq(&vpath_reg->rts_access_steer_data0);
2870 data2 = readq(&vpath_reg->rts_access_steer_data1);
2871
2872 fw_date->day =
2873 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2874 data1);
2875 fw_date->month =
2876 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2877 data1);
2878 fw_date->year =
2879 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2880 data1);
2881
2882 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2883 fw_date->month, fw_date->day, fw_date->year);
2884
2885 fw_version->major =
2886 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2887 fw_version->minor =
2888 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2889 fw_version->build =
2890 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2891
2892 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2893 fw_version->major, fw_version->minor, fw_version->build);
2894
2895 flash_date->day =
2896 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2897 flash_date->month =
2898 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2899 flash_date->year =
2900 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2901
2902 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2903 "%2.2d/%2.2d/%4.4d",
2904 flash_date->month, flash_date->day, flash_date->year);
2905
2906 flash_version->major =
2907 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2908 flash_version->minor =
2909 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2910 flash_version->build =
2911 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2912
2913 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2914 flash_version->major, flash_version->minor,
2915 flash_version->build);
2916
2917 status = VXGE_HW_OK;
2918
2919 } else
2920 status = VXGE_HW_FAIL;
2921exit:
2922 return status;
2923}
2924
2925/*
2926 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2927 * Returns pci function mode
2928 */
2929static u64
2930__vxge_hw_vpath_pci_func_mode_get(
2931 u32 vp_id,
2932 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2933{
2934 u64 val64;
2935 u64 data1 = 0ULL;
2936 enum vxge_hw_status status = VXGE_HW_OK;
2937
2938 __vxge_hw_read_rts_ds(vpath_reg,
2939 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2940
2941 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2942 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2943 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2944 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2945 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2946 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2947
2948 status = __vxge_hw_pio_mem_write64(val64,
2949 &vpath_reg->rts_access_steer_ctrl,
2950 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2951 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2952
2953 if (status != VXGE_HW_OK)
2954 goto exit;
2955
2956 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2957
2958 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2959 data1 = readq(&vpath_reg->rts_access_steer_data0);
2960 status = VXGE_HW_OK;
2961 } else {
2962 data1 = 0;
2963 status = VXGE_HW_FAIL;
2964 }
2965exit:
2966 return data1;
2967}
2968
2969/** 3100/**
2970 * vxge_hw_device_flick_link_led - Flick (blink) link LED. 3101 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2971 * @hldev: HW device. 3102 * @hldev: HW device.
@@ -2974,37 +3105,24 @@ exit:
2974 * Flicker the link LED. 3105 * Flicker the link LED.
2975 */ 3106 */
2976enum vxge_hw_status 3107enum vxge_hw_status
2977vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, 3108vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
2978 u64 on_off)
2979{ 3109{
2980 u64 val64; 3110 struct __vxge_hw_virtualpath *vpath;
2981 enum vxge_hw_status status = VXGE_HW_OK; 3111 u64 data0, data1 = 0, steer_ctrl = 0;
2982 struct vxge_hw_vpath_reg __iomem *vp_reg; 3112 enum vxge_hw_status status;
2983 3113
2984 if (hldev == NULL) { 3114 if (hldev == NULL) {
2985 status = VXGE_HW_ERR_INVALID_DEVICE; 3115 status = VXGE_HW_ERR_INVALID_DEVICE;
2986 goto exit; 3116 goto exit;
2987 } 3117 }
2988 3118
2989 vp_reg = hldev->vpath_reg[hldev->first_vp_id]; 3119 vpath = &hldev->virtual_paths[hldev->first_vp_id];
2990
2991 writeq(0, &vp_reg->rts_access_steer_ctrl);
2992 wmb();
2993 writeq(on_off, &vp_reg->rts_access_steer_data0);
2994 writeq(0, &vp_reg->rts_access_steer_data1);
2995 wmb();
2996
2997 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2998 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
2999 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3000 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3001 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3002 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3003 3120
3004 status = __vxge_hw_pio_mem_write64(val64, 3121 data0 = on_off;
3005 &vp_reg->rts_access_steer_ctrl, 3122 status = vxge_hw_vpath_fw_api(vpath,
3006 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, 3123 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
3007 VXGE_HW_DEF_DEVICE_POLL_MILLIS); 3124 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
3125 0, &data0, &data1, &steer_ctrl);
3008exit: 3126exit:
3009 return status; 3127 return status;
3010} 3128}
@@ -3013,63 +3131,38 @@ exit:
3013 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables 3131 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3014 */ 3132 */
3015enum vxge_hw_status 3133enum vxge_hw_status
3016__vxge_hw_vpath_rts_table_get( 3134__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3017 struct __vxge_hw_vpath_handle *vp, 3135 u32 action, u32 rts_table, u32 offset,
3018 u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2) 3136 u64 *data0, u64 *data1)
3019{ 3137{
3020 u64 val64; 3138 enum vxge_hw_status status;
3021 struct __vxge_hw_virtualpath *vpath; 3139 u64 steer_ctrl = 0;
3022 struct vxge_hw_vpath_reg __iomem *vp_reg;
3023
3024 enum vxge_hw_status status = VXGE_HW_OK;
3025 3140
3026 if (vp == NULL) { 3141 if (vp == NULL) {
3027 status = VXGE_HW_ERR_INVALID_HANDLE; 3142 status = VXGE_HW_ERR_INVALID_HANDLE;
3028 goto exit; 3143 goto exit;
3029 } 3144 }
3030 3145
3031 vpath = vp->vpath;
3032 vp_reg = vpath->vp_reg;
3033
3034 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3035 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3036 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3037 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3038
3039 if ((rts_table == 3146 if ((rts_table ==
3040 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || 3147 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3041 (rts_table == 3148 (rts_table ==
3042 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || 3149 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3043 (rts_table == 3150 (rts_table ==
3044 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || 3151 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3045 (rts_table == 3152 (rts_table ==
3046 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { 3153 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3047 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; 3154 steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3048 } 3155 }
3049 3156
3050 status = __vxge_hw_pio_mem_write64(val64, 3157 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3051 &vp_reg->rts_access_steer_ctrl, 3158 data0, data1, &steer_ctrl);
3052 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3053 vpath->hldev->config.device_poll_millis);
3054
3055 if (status != VXGE_HW_OK) 3159 if (status != VXGE_HW_OK)
3056 goto exit; 3160 goto exit;
3057 3161
3058 val64 = readq(&vp_reg->rts_access_steer_ctrl); 3162 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3059 3163 (rts_table !=
3060 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { 3164 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3061 3165 *data1 = 0;
3062 *data1 = readq(&vp_reg->rts_access_steer_data0);
3063
3064 if ((rts_table ==
3065 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3066 (rts_table ==
3067 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3068 *data2 = readq(&vp_reg->rts_access_steer_data1);
3069 }
3070 status = VXGE_HW_OK;
3071 } else
3072 status = VXGE_HW_FAIL;
3073exit: 3166exit:
3074 return status; 3167 return status;
3075} 3168}
@@ -3078,107 +3171,27 @@ exit:
3078 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables 3171 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3079 */ 3172 */
3080enum vxge_hw_status 3173enum vxge_hw_status
3081__vxge_hw_vpath_rts_table_set( 3174__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3082 struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, 3175 u32 rts_table, u32 offset, u64 steer_data0,
3083 u32 offset, u64 data1, u64 data2) 3176 u64 steer_data1)
3084{ 3177{
3085 u64 val64; 3178 u64 data0, data1 = 0, steer_ctrl = 0;
3086 struct __vxge_hw_virtualpath *vpath; 3179 enum vxge_hw_status status;
3087 enum vxge_hw_status status = VXGE_HW_OK;
3088 struct vxge_hw_vpath_reg __iomem *vp_reg;
3089 3180
3090 if (vp == NULL) { 3181 if (vp == NULL) {
3091 status = VXGE_HW_ERR_INVALID_HANDLE; 3182 status = VXGE_HW_ERR_INVALID_HANDLE;
3092 goto exit; 3183 goto exit;
3093 } 3184 }
3094 3185
3095 vpath = vp->vpath; 3186 data0 = steer_data0;
3096 vp_reg = vpath->vp_reg;
3097
3098 writeq(data1, &vp_reg->rts_access_steer_data0);
3099 wmb();
3100 3187
3101 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || 3188 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3102 (rts_table == 3189 (rts_table ==
3103 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { 3190 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3104 writeq(data2, &vp_reg->rts_access_steer_data1); 3191 data1 = steer_data1;
3105 wmb();
3106 }
3107 3192
3108 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | 3193 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3109 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | 3194 &data0, &data1, &steer_ctrl);
3110 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3111 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3112
3113 status = __vxge_hw_pio_mem_write64(val64,
3114 &vp_reg->rts_access_steer_ctrl,
3115 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3116 vpath->hldev->config.device_poll_millis);
3117
3118 if (status != VXGE_HW_OK)
3119 goto exit;
3120
3121 val64 = readq(&vp_reg->rts_access_steer_ctrl);
3122
3123 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
3124 status = VXGE_HW_OK;
3125 else
3126 status = VXGE_HW_FAIL;
3127exit:
3128 return status;
3129}
3130
3131/*
3132 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3133 * from MAC address table.
3134 */
3135static enum vxge_hw_status
3136__vxge_hw_vpath_addr_get(
3137 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3138 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3139{
3140 u32 i;
3141 u64 val64;
3142 u64 data1 = 0ULL;
3143 u64 data2 = 0ULL;
3144 enum vxge_hw_status status = VXGE_HW_OK;
3145
3146 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3147 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3148 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3149 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3150 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3151 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3152
3153 status = __vxge_hw_pio_mem_write64(val64,
3154 &vpath_reg->rts_access_steer_ctrl,
3155 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3156 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3157
3158 if (status != VXGE_HW_OK)
3159 goto exit;
3160
3161 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3162
3163 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3164
3165 data1 = readq(&vpath_reg->rts_access_steer_data0);
3166 data2 = readq(&vpath_reg->rts_access_steer_data1);
3167
3168 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3169 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3170 data2);
3171
3172 for (i = ETH_ALEN; i > 0; i--) {
3173 macaddr[i-1] = (u8)(data1 & 0xFF);
3174 data1 >>= 8;
3175
3176 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3177 data2 >>= 8;
3178 }
3179 status = VXGE_HW_OK;
3180 } else
3181 status = VXGE_HW_FAIL;
3182exit: 3195exit:
3183 return status; 3196 return status;
3184} 3197}
@@ -3204,6 +3217,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3204 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, 3217 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3205 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, 3218 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3206 0, &data0, &data1); 3219 0, &data0, &data1);
3220 if (status != VXGE_HW_OK)
3221 goto exit;
3207 3222
3208 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | 3223 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3209 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); 3224 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
@@ -4117,6 +4132,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4117 4132
4118 vpath = &hldev->virtual_paths[vp_id]; 4133 vpath = &hldev->virtual_paths[vp_id];
4119 4134
4135 spin_lock_init(&hldev->virtual_paths[vp_id].lock);
4120 vpath->vp_id = vp_id; 4136 vpath->vp_id = vp_id;
4121 vpath->vp_open = VXGE_HW_VP_OPEN; 4137 vpath->vp_open = VXGE_HW_VP_OPEN;
4122 vpath->hldev = hldev; 4138 vpath->hldev = hldev;
@@ -4127,14 +4143,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4127 __vxge_hw_vpath_reset(hldev, vp_id); 4143 __vxge_hw_vpath_reset(hldev, vp_id);
4128 4144
4129 status = __vxge_hw_vpath_reset_check(vpath); 4145 status = __vxge_hw_vpath_reset_check(vpath);
4130
4131 if (status != VXGE_HW_OK) { 4146 if (status != VXGE_HW_OK) {
4132 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); 4147 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4133 goto exit; 4148 goto exit;
4134 } 4149 }
4135 4150
4136 status = __vxge_hw_vpath_mgmt_read(hldev, vpath); 4151 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4137
4138 if (status != VXGE_HW_OK) { 4152 if (status != VXGE_HW_OK) {
4139 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); 4153 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4140 goto exit; 4154 goto exit;
@@ -4148,7 +4162,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4148 hldev->tim_int_mask1, vp_id); 4162 hldev->tim_int_mask1, vp_id);
4149 4163
4150 status = __vxge_hw_vpath_initialize(hldev, vp_id); 4164 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4151
4152 if (status != VXGE_HW_OK) 4165 if (status != VXGE_HW_OK)
4153 __vxge_hw_vp_terminate(hldev, vp_id); 4166 __vxge_hw_vp_terminate(hldev, vp_id);
4154exit: 4167exit:
@@ -4242,15 +4255,12 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4242 if (status != VXGE_HW_OK) 4255 if (status != VXGE_HW_OK)
4243 goto vpath_open_exit1; 4256 goto vpath_open_exit1;
4244 4257
4245 vp = (struct __vxge_hw_vpath_handle *) 4258 vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
4246 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4247 if (vp == NULL) { 4259 if (vp == NULL) {
4248 status = VXGE_HW_ERR_OUT_OF_MEMORY; 4260 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4249 goto vpath_open_exit2; 4261 goto vpath_open_exit2;
4250 } 4262 }
4251 4263
4252 memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4253
4254 vp->vpath = vpath; 4264 vp->vpath = vpath;
4255 4265
4256 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { 4266 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
@@ -4335,16 +4345,18 @@ vpath_open_exit1:
4335void 4345void
4336vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) 4346vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4337{ 4347{
4338 struct __vxge_hw_virtualpath *vpath = NULL; 4348 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4349 struct __vxge_hw_ring *ring = vpath->ringh;
4350 struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4339 u64 new_count, val64, val164; 4351 u64 new_count, val64, val164;
4340 struct __vxge_hw_ring *ring;
4341 4352
4342 vpath = vp->vpath; 4353 if (vdev->titan1) {
4343 ring = vpath->ringh; 4354 new_count = readq(&vpath->vp_reg->rxdmem_size);
4355 new_count &= 0x1fff;
4356 } else
4357 new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
4344 4358
4345 new_count = readq(&vpath->vp_reg->rxdmem_size); 4359 val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
4346 new_count &= 0x1fff;
4347 val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4348 4360
4349 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), 4361 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4350 &vpath->vp_reg->prc_rxd_doorbell); 4362 &vpath->vp_reg->prc_rxd_doorbell);
@@ -4414,7 +4426,9 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4414 4426
4415 __vxge_hw_vp_terminate(devh, vp_id); 4427 __vxge_hw_vp_terminate(devh, vp_id);
4416 4428
4429 spin_lock(&vpath->lock);
4417 vpath->vp_open = VXGE_HW_VP_NOT_OPEN; 4430 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4431 spin_unlock(&vpath->lock);
4418 4432
4419vpath_close_exit: 4433vpath_close_exit:
4420 return status; 4434 return status;
@@ -4810,7 +4824,7 @@ static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4810 * __vxge_hw_blockpool_create - Create block pool 4824 * __vxge_hw_blockpool_create - Create block pool
4811 */ 4825 */
4812 4826
4813enum vxge_hw_status 4827static enum vxge_hw_status
4814__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, 4828__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4815 struct __vxge_hw_blockpool *blockpool, 4829 struct __vxge_hw_blockpool *blockpool,
4816 u32 pool_size, 4830 u32 pool_size,
@@ -4910,7 +4924,7 @@ blockpool_create_exit:
4910 * __vxge_hw_blockpool_destroy - Deallocates the block pool 4924 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4911 */ 4925 */
4912 4926
4913void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) 4927static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4914{ 4928{
4915 4929
4916 struct __vxge_hw_device *hldev; 4930 struct __vxge_hw_device *hldev;
@@ -5047,8 +5061,7 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
5047 item); 5061 item);
5048 5062
5049 if (entry == NULL) 5063 if (entry == NULL)
5050 entry = (struct __vxge_hw_blockpool_entry *) 5064 entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5051 vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5052 else 5065 else
5053 list_del(&entry->item); 5066 list_del(&entry->item);
5054 5067
@@ -5076,7 +5089,7 @@ exit:
5076 * Allocates a block of memory of given size, either from block pool 5089 * Allocates a block of memory of given size, either from block pool
5077 * or by calling vxge_os_dma_malloc() 5090 * or by calling vxge_os_dma_malloc()
5078 */ 5091 */
5079void * 5092static void *
5080__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, 5093__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5081 struct vxge_hw_mempool_dma *dma_object) 5094 struct vxge_hw_mempool_dma *dma_object)
5082{ 5095{
@@ -5140,7 +5153,7 @@ exit:
5140 * __vxge_hw_blockpool_free - Frees the memory allcoated with 5153 * __vxge_hw_blockpool_free - Frees the memory allcoated with
5141 __vxge_hw_blockpool_malloc 5154 __vxge_hw_blockpool_malloc
5142 */ 5155 */
5143void 5156static void
5144__vxge_hw_blockpool_free(struct __vxge_hw_device *devh, 5157__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5145 void *memblock, u32 size, 5158 void *memblock, u32 size,
5146 struct vxge_hw_mempool_dma *dma_object) 5159 struct vxge_hw_mempool_dma *dma_object)
@@ -5164,8 +5177,7 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5164 item); 5177 item);
5165 5178
5166 if (entry == NULL) 5179 if (entry == NULL)
5167 entry = (struct __vxge_hw_blockpool_entry *) 5180 entry = vmalloc(sizeof(
5168 vmalloc(sizeof(
5169 struct __vxge_hw_blockpool_entry)); 5181 struct __vxge_hw_blockpool_entry));
5170 else 5182 else
5171 list_del(&entry->item); 5183 list_del(&entry->item);
@@ -5192,7 +5204,7 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5192 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool 5204 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5193 * This function allocates a block from block pool or from the system 5205 * This function allocates a block from block pool or from the system
5194 */ 5206 */
5195struct __vxge_hw_blockpool_entry * 5207static struct __vxge_hw_blockpool_entry *
5196__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) 5208__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5197{ 5209{
5198 struct __vxge_hw_blockpool_entry *entry = NULL; 5210 struct __vxge_hw_blockpool_entry *entry = NULL;
@@ -5227,7 +5239,7 @@ __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5227 * 5239 *
5228 * This function frees a block from block pool 5240 * This function frees a block from block pool
5229 */ 5241 */
5230void 5242static void
5231__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, 5243__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5232 struct __vxge_hw_blockpool_entry *entry) 5244 struct __vxge_hw_blockpool_entry *entry)
5233{ 5245{
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 5c00861b6c2..5b2c8313426 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -20,13 +20,6 @@
20#define VXGE_CACHE_LINE_SIZE 128 20#define VXGE_CACHE_LINE_SIZE 128
21#endif 21#endif
22 22
23#define vxge_os_vaprintf(level, mask, fmt, ...) { \
24 char buff[255]; \
25 snprintf(buff, 255, fmt, __VA_ARGS__); \
26 printk(buff); \
27 printk("\n"); \
28}
29
30#ifndef VXGE_ALIGN 23#ifndef VXGE_ALIGN
31#define VXGE_ALIGN(adrs, size) \ 24#define VXGE_ALIGN(adrs, size) \
32 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) 25 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
@@ -36,8 +29,16 @@
36#define VXGE_HW_MAX_MTU 9600 29#define VXGE_HW_MAX_MTU 9600
37#define VXGE_HW_DEFAULT_MTU 1500 30#define VXGE_HW_DEFAULT_MTU 1500
38 31
39#ifdef VXGE_DEBUG_ASSERT 32#define VXGE_HW_MAX_ROM_IMAGES 8
33
34struct eprom_image {
35 u8 is_valid:1;
36 u8 index;
37 u8 type;
38 u16 version;
39};
40 40
41#ifdef VXGE_DEBUG_ASSERT
41/** 42/**
42 * vxge_assert 43 * vxge_assert
43 * @test: C-condition to check 44 * @test: C-condition to check
@@ -48,16 +49,13 @@
48 * compilation 49 * compilation
49 * time. 50 * time.
50 */ 51 */
51#define vxge_assert(test) { \ 52#define vxge_assert(test) BUG_ON(!(test))
52 if (!(test)) \
53 vxge_os_bug("bad cond: "#test" at %s:%d\n", \
54 __FILE__, __LINE__); }
55#else 53#else
56#define vxge_assert(test) 54#define vxge_assert(test)
57#endif /* end of VXGE_DEBUG_ASSERT */ 55#endif /* end of VXGE_DEBUG_ASSERT */
58 56
59/** 57/**
60 * enum enum vxge_debug_level 58 * enum vxge_debug_level
61 * @VXGE_NONE: debug disabled 59 * @VXGE_NONE: debug disabled
62 * @VXGE_ERR: all errors going to be logged out 60 * @VXGE_ERR: all errors going to be logged out
63 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs 61 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
@@ -159,6 +157,47 @@ enum vxge_hw_device_link_state {
159}; 157};
160 158
161/** 159/**
160 * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
161 * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
162 * @VXGE_HW_FW_UPGRADE_DONE: upload completed
163 * @VXGE_HW_FW_UPGRADE_ERR: upload error
164 * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
165 *
166 */
167enum vxge_hw_fw_upgrade_code {
168 VXGE_HW_FW_UPGRADE_OK = 0,
169 VXGE_HW_FW_UPGRADE_DONE = 1,
170 VXGE_HW_FW_UPGRADE_ERR = 2,
171 VXGE_FW_UPGRADE_BYTES2SKIP = 3
172};
173
174/**
175 * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
176 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
177 * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
178 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
179 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
180 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
181 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
182 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
183 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
184 * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
185 * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
186 */
187enum vxge_hw_fw_upgrade_err_code {
188 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
189 VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
190 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
191 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
192 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
193 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
194 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
195 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
196 VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
197 VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
198};
199
200/**
162 * struct vxge_hw_device_date - Date Format 201 * struct vxge_hw_device_date - Date Format
163 * @day: Day 202 * @day: Day
164 * @month: Month 203 * @month: Month
@@ -465,7 +504,6 @@ struct vxge_hw_device_config {
465 * See also: vxge_hw_driver_initialize(). 504 * See also: vxge_hw_driver_initialize().
466 */ 505 */
467struct vxge_hw_uld_cbs { 506struct vxge_hw_uld_cbs {
468
469 void (*link_up)(struct __vxge_hw_device *devh); 507 void (*link_up)(struct __vxge_hw_device *devh);
470 void (*link_down)(struct __vxge_hw_device *devh); 508 void (*link_down)(struct __vxge_hw_device *devh);
471 void (*crit_err)(struct __vxge_hw_device *devh, 509 void (*crit_err)(struct __vxge_hw_device *devh,
@@ -652,6 +690,7 @@ struct __vxge_hw_virtualpath {
652 struct vxge_hw_vpath_stats_hw_info *hw_stats; 690 struct vxge_hw_vpath_stats_hw_info *hw_stats;
653 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; 691 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
654 struct vxge_hw_vpath_stats_sw_info *sw_stats; 692 struct vxge_hw_vpath_stats_sw_info *sw_stats;
693 spinlock_t lock;
655}; 694};
656 695
657/* 696/*
@@ -674,9 +713,6 @@ struct __vxge_hw_vpath_handle{
674/** 713/**
675 * struct __vxge_hw_device - Hal device object 714 * struct __vxge_hw_device - Hal device object
676 * @magic: Magic Number 715 * @magic: Magic Number
677 * @device_id: PCI Device Id of the adapter
678 * @major_revision: PCI Device major revision
679 * @minor_revision: PCI Device minor revision
680 * @bar0: BAR0 virtual address. 716 * @bar0: BAR0 virtual address.
681 * @pdev: Physical device handle 717 * @pdev: Physical device handle
682 * @config: Confguration passed by the LL driver at initialization 718 * @config: Confguration passed by the LL driver at initialization
@@ -688,9 +724,6 @@ struct __vxge_hw_device {
688 u32 magic; 724 u32 magic;
689#define VXGE_HW_DEVICE_MAGIC 0x12345678 725#define VXGE_HW_DEVICE_MAGIC 0x12345678
690#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD 726#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
691 u16 device_id;
692 u8 major_revision;
693 u8 minor_revision;
694 void __iomem *bar0; 727 void __iomem *bar0;
695 struct pci_dev *pdev; 728 struct pci_dev *pdev;
696 struct net_device *ndev; 729 struct net_device *ndev;
@@ -731,6 +764,7 @@ struct __vxge_hw_device {
731 u32 debug_level; 764 u32 debug_level;
732 u32 level_err; 765 u32 level_err;
733 u32 level_trace; 766 u32 level_trace;
767 u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
734}; 768};
735 769
736#define VXGE_HW_INFO_LEN 64 770#define VXGE_HW_INFO_LEN 64
@@ -1413,12 +1447,12 @@ enum vxge_hw_rth_algoritms {
1413 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). 1447 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1414 */ 1448 */
1415struct vxge_hw_rth_hash_types { 1449struct vxge_hw_rth_hash_types {
1416 u8 hash_type_tcpipv4_en; 1450 u8 hash_type_tcpipv4_en:1,
1417 u8 hash_type_ipv4_en; 1451 hash_type_ipv4_en:1,
1418 u8 hash_type_tcpipv6_en; 1452 hash_type_tcpipv6_en:1,
1419 u8 hash_type_ipv6_en; 1453 hash_type_ipv6_en:1,
1420 u8 hash_type_tcpipv6ex_en; 1454 hash_type_tcpipv6ex_en:1,
1421 u8 hash_type_ipv6ex_en; 1455 hash_type_ipv6ex_en:1;
1422}; 1456};
1423 1457
1424void vxge_hw_device_debug_set( 1458void vxge_hw_device_debug_set(
@@ -2000,7 +2034,7 @@ enum vxge_hw_status
2000vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); 2034vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2001 2035
2002/** 2036/**
2003 * vxge_debug 2037 * vxge_debug_ll
2004 * @level: level of debug verbosity. 2038 * @level: level of debug verbosity.
2005 * @mask: mask for the debug 2039 * @mask: mask for the debug
2006 * @buf: Circular buffer for tracing 2040 * @buf: Circular buffer for tracing
@@ -2012,26 +2046,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2012 * may be compiled out if DEBUG macro was never defined. 2046 * may be compiled out if DEBUG macro was never defined.
2013 * See also: enum vxge_debug_level{}. 2047 * See also: enum vxge_debug_level{}.
2014 */ 2048 */
2015
2016#define vxge_trace_aux(level, mask, fmt, ...) \
2017{\
2018 vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
2019}
2020
2021#define vxge_debug(module, level, mask, fmt, ...) { \
2022if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
2023 (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
2024 if ((mask & VXGE_DEBUG_MASK) == mask)\
2025 vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
2026} \
2027}
2028
2029#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) 2049#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2030#define vxge_debug_ll(level, mask, fmt, ...) \ 2050#define vxge_debug_ll(level, mask, fmt, ...) do { \
2031{\ 2051 if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
2032 vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\ 2052 (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
2033} 2053 if ((mask & VXGE_DEBUG_MASK) == mask) \
2034 2054 printk(fmt "\n", __VA_ARGS__); \
2055} while (0)
2035#else 2056#else
2036#define vxge_debug_ll(level, mask, fmt, ...) 2057#define vxge_debug_ll(level, mask, fmt, ...)
2037#endif 2058#endif
@@ -2051,4 +2072,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2051 2072
2052enum vxge_hw_status 2073enum vxge_hw_status
2053__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); 2074__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
2075
2076#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
2077#define VXGE_HW_MAX_POLLING_COUNT 100
2078
2079void
2080vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
2081
2082enum vxge_hw_status
2083vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
2084 u32 *minor, u32 *build);
2085
2086enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
2087
2088enum vxge_hw_status
2089vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
2090 int size);
2091
2092enum vxge_hw_status
2093vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
2094 struct eprom_image *eprom_image_data);
2095
2096int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
2054#endif 2097#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index b67746eef92..1dd3a21b3a4 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -11,7 +11,7 @@
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#include<linux/ethtool.h> 14#include <linux/ethtool.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
@@ -29,7 +29,6 @@
29 * Return value: 29 * Return value:
30 * 0 on success. 30 * 0 on success.
31 */ 31 */
32
33static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) 32static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
34{ 33{
35 /* We currently only support 10Gb/FULL */ 34 /* We currently only support 10Gb/FULL */
@@ -79,10 +78,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
79 * Returns driver specefic information like name, version etc.. to ethtool. 78 * Returns driver specefic information like name, version etc.. to ethtool.
80 */ 79 */
81static void vxge_ethtool_gdrvinfo(struct net_device *dev, 80static void vxge_ethtool_gdrvinfo(struct net_device *dev,
82 struct ethtool_drvinfo *info) 81 struct ethtool_drvinfo *info)
83{ 82{
84 struct vxgedev *vdev; 83 struct vxgedev *vdev = netdev_priv(dev);
85 vdev = (struct vxgedev *)netdev_priv(dev);
86 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME)); 84 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
87 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); 85 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
88 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN); 86 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
@@ -104,15 +102,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
104 * buffer area. 102 * buffer area.
105 */ 103 */
106static void vxge_ethtool_gregs(struct net_device *dev, 104static void vxge_ethtool_gregs(struct net_device *dev,
107 struct ethtool_regs *regs, void *space) 105 struct ethtool_regs *regs, void *space)
108{ 106{
109 int index, offset; 107 int index, offset;
110 enum vxge_hw_status status; 108 enum vxge_hw_status status;
111 u64 reg; 109 u64 reg;
112 u64 *reg_space = (u64 *) space; 110 u64 *reg_space = (u64 *)space;
113 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 111 struct vxgedev *vdev = netdev_priv(dev);
114 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 112 struct __vxge_hw_device *hldev = vdev->devh;
115 pci_get_drvdata(vdev->pdev);
116 113
117 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; 114 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
118 regs->version = vdev->pdev->subsystem_device; 115 regs->version = vdev->pdev->subsystem_device;
@@ -147,9 +144,8 @@ static void vxge_ethtool_gregs(struct net_device *dev,
147 */ 144 */
148static int vxge_ethtool_idnic(struct net_device *dev, u32 data) 145static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
149{ 146{
150 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 147 struct vxgedev *vdev = netdev_priv(dev);
151 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 148 struct __vxge_hw_device *hldev = vdev->devh;
152 pci_get_drvdata(vdev->pdev);
153 149
154 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); 150 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
155 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME); 151 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
@@ -168,11 +164,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
168 * void 164 * void
169 */ 165 */
170static void vxge_ethtool_getpause_data(struct net_device *dev, 166static void vxge_ethtool_getpause_data(struct net_device *dev,
171 struct ethtool_pauseparam *ep) 167 struct ethtool_pauseparam *ep)
172{ 168{
173 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 169 struct vxgedev *vdev = netdev_priv(dev);
174 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 170 struct __vxge_hw_device *hldev = vdev->devh;
175 pci_get_drvdata(vdev->pdev);
176 171
177 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause); 172 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
178} 173}
@@ -188,11 +183,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev,
188 * int, returns 0 on Success 183 * int, returns 0 on Success
189 */ 184 */
190static int vxge_ethtool_setpause_data(struct net_device *dev, 185static int vxge_ethtool_setpause_data(struct net_device *dev,
191 struct ethtool_pauseparam *ep) 186 struct ethtool_pauseparam *ep)
192{ 187{
193 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 188 struct vxgedev *vdev = netdev_priv(dev);
194 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 189 struct __vxge_hw_device *hldev = vdev->devh;
195 pci_get_drvdata(vdev->pdev);
196 190
197 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause); 191 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
198 192
@@ -209,9 +203,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
209 enum vxge_hw_status status; 203 enum vxge_hw_status status;
210 enum vxge_hw_status swstatus; 204 enum vxge_hw_status swstatus;
211 struct vxge_vpath *vpath = NULL; 205 struct vxge_vpath *vpath = NULL;
212 206 struct vxgedev *vdev = netdev_priv(dev);
213 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 207 struct __vxge_hw_device *hldev = vdev->devh;
214 struct __vxge_hw_device *hldev = vdev->devh;
215 struct vxge_hw_xmac_stats *xmac_stats; 208 struct vxge_hw_xmac_stats *xmac_stats;
216 struct vxge_hw_device_stats_sw_info *sw_stats; 209 struct vxge_hw_device_stats_sw_info *sw_stats;
217 struct vxge_hw_device_stats_hw_info *hw_stats; 210 struct vxge_hw_device_stats_hw_info *hw_stats;
@@ -574,12 +567,12 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
574 kfree(hw_stats); 567 kfree(hw_stats);
575} 568}
576 569
577static void vxge_ethtool_get_strings(struct net_device *dev, 570static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
578 u32 stringset, u8 *data) 571 u8 *data)
579{ 572{
580 int stat_size = 0; 573 int stat_size = 0;
581 int i, j; 574 int i, j;
582 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 575 struct vxgedev *vdev = netdev_priv(dev);
583 switch (stringset) { 576 switch (stringset) {
584 case ETH_SS_STATS: 577 case ETH_SS_STATS:
585 vxge_add_string("VPATH STATISTICS%s\t\t\t", 578 vxge_add_string("VPATH STATISTICS%s\t\t\t",
@@ -1066,21 +1059,21 @@ static void vxge_ethtool_get_strings(struct net_device *dev,
1066 1059
1067static int vxge_ethtool_get_regs_len(struct net_device *dev) 1060static int vxge_ethtool_get_regs_len(struct net_device *dev)
1068{ 1061{
1069 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1062 struct vxgedev *vdev = netdev_priv(dev);
1070 1063
1071 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; 1064 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
1072} 1065}
1073 1066
1074static u32 vxge_get_rx_csum(struct net_device *dev) 1067static u32 vxge_get_rx_csum(struct net_device *dev)
1075{ 1068{
1076 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1069 struct vxgedev *vdev = netdev_priv(dev);
1077 1070
1078 return vdev->rx_csum; 1071 return vdev->rx_csum;
1079} 1072}
1080 1073
1081static int vxge_set_rx_csum(struct net_device *dev, u32 data) 1074static int vxge_set_rx_csum(struct net_device *dev, u32 data)
1082{ 1075{
1083 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1076 struct vxgedev *vdev = netdev_priv(dev);
1084 1077
1085 if (data) 1078 if (data)
1086 vdev->rx_csum = 1; 1079 vdev->rx_csum = 1;
@@ -1102,7 +1095,7 @@ static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
1102 1095
1103static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) 1096static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1104{ 1097{
1105 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1098 struct vxgedev *vdev = netdev_priv(dev);
1106 1099
1107 switch (sset) { 1100 switch (sset) {
1108 case ETH_SS_STATS: 1101 case ETH_SS_STATS:
@@ -1119,6 +1112,59 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1119 } 1112 }
1120} 1113}
1121 1114
1115static int vxge_set_flags(struct net_device *dev, u32 data)
1116{
1117 struct vxgedev *vdev = netdev_priv(dev);
1118 enum vxge_hw_status status;
1119
1120 if (data & ~ETH_FLAG_RXHASH)
1121 return -EOPNOTSUPP;
1122
1123 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
1124 return 0;
1125
1126 if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
1127 return -EINVAL;
1128
1129 vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
1130
1131 /* Enabling RTH requires some of the logic in vxge_device_register and a
1132 * vpath reset. Due to these restrictions, only allow modification
1133 * while the interface is down.
1134 */
1135 status = vxge_reset_all_vpaths(vdev);
1136 if (status != VXGE_HW_OK) {
1137 vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
1138 return -EFAULT;
1139 }
1140
1141 if (vdev->devh->config.rth_en)
1142 dev->features |= NETIF_F_RXHASH;
1143 else
1144 dev->features &= ~NETIF_F_RXHASH;
1145
1146 return 0;
1147}
1148
1149static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
1150{
1151 struct vxgedev *vdev = netdev_priv(dev);
1152
1153 if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
1154 printk(KERN_INFO "Single Function Mode is required to flash the"
1155 " firmware\n");
1156 return -EINVAL;
1157 }
1158
1159 if (netif_running(dev)) {
1160 printk(KERN_INFO "Interface %s must be down to flash the "
1161 "firmware\n", dev->name);
1162 return -EBUSY;
1163 }
1164
1165 return vxge_fw_upgrade(vdev, parms->data, 1);
1166}
1167
1122static const struct ethtool_ops vxge_ethtool_ops = { 1168static const struct ethtool_ops vxge_ethtool_ops = {
1123 .get_settings = vxge_ethtool_gset, 1169 .get_settings = vxge_ethtool_gset,
1124 .set_settings = vxge_ethtool_sset, 1170 .set_settings = vxge_ethtool_sset,
@@ -1131,7 +1177,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1131 .get_rx_csum = vxge_get_rx_csum, 1177 .get_rx_csum = vxge_get_rx_csum,
1132 .set_rx_csum = vxge_set_rx_csum, 1178 .set_rx_csum = vxge_set_rx_csum,
1133 .get_tx_csum = ethtool_op_get_tx_csum, 1179 .get_tx_csum = ethtool_op_get_tx_csum,
1134 .set_tx_csum = ethtool_op_set_tx_hw_csum, 1180 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1135 .get_sg = ethtool_op_get_sg, 1181 .get_sg = ethtool_op_get_sg,
1136 .set_sg = ethtool_op_set_sg, 1182 .set_sg = ethtool_op_set_sg,
1137 .get_tso = ethtool_op_get_tso, 1183 .get_tso = ethtool_op_get_tso,
@@ -1140,6 +1186,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1140 .phys_id = vxge_ethtool_idnic, 1186 .phys_id = vxge_ethtool_idnic,
1141 .get_sset_count = vxge_ethtool_get_sset_count, 1187 .get_sset_count = vxge_ethtool_get_sset_count,
1142 .get_ethtool_stats = vxge_get_ethtool_stats, 1188 .get_ethtool_stats = vxge_get_ethtool_stats,
1189 .set_flags = vxge_set_flags,
1190 .flash_device = vxge_fw_flash,
1143}; 1191};
1144 1192
1145void vxge_initialize_ethtool_ops(struct net_device *ndev) 1193void vxge_initialize_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 813829f3d02..4877b3b8a29 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -50,6 +50,8 @@
50#include <net/ip.h> 50#include <net/ip.h>
51#include <linux/netdevice.h> 51#include <linux/netdevice.h>
52#include <linux/etherdevice.h> 52#include <linux/etherdevice.h>
53#include <linux/firmware.h>
54#include <linux/net_tstamp.h>
53#include "vxge-main.h" 55#include "vxge-main.h"
54#include "vxge-reg.h" 56#include "vxge-reg.h"
55 57
@@ -90,7 +92,6 @@ static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
90static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac); 92static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
91static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath); 93static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
92static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath); 94static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
93static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
94 95
95static inline int is_vxge_card_up(struct vxgedev *vdev) 96static inline int is_vxge_card_up(struct vxgedev *vdev)
96{ 97{
@@ -152,7 +153,7 @@ static void
152vxge_callback_link_up(struct __vxge_hw_device *hldev) 153vxge_callback_link_up(struct __vxge_hw_device *hldev)
153{ 154{
154 struct net_device *dev = hldev->ndev; 155 struct net_device *dev = hldev->ndev;
155 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 156 struct vxgedev *vdev = netdev_priv(dev);
156 157
157 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 158 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
158 vdev->ndev->name, __func__, __LINE__); 159 vdev->ndev->name, __func__, __LINE__);
@@ -176,7 +177,7 @@ static void
176vxge_callback_link_down(struct __vxge_hw_device *hldev) 177vxge_callback_link_down(struct __vxge_hw_device *hldev)
177{ 178{
178 struct net_device *dev = hldev->ndev; 179 struct net_device *dev = hldev->ndev;
179 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 180 struct vxgedev *vdev = netdev_priv(dev);
180 181
181 vxge_debug_entryexit(VXGE_TRACE, 182 vxge_debug_entryexit(VXGE_TRACE,
182 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 183 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
@@ -369,7 +370,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
369 u8 t_code, void *userdata) 370 u8 t_code, void *userdata)
370{ 371{
371 struct vxge_ring *ring = (struct vxge_ring *)userdata; 372 struct vxge_ring *ring = (struct vxge_ring *)userdata;
372 struct net_device *dev = ring->ndev; 373 struct net_device *dev = ring->ndev;
373 unsigned int dma_sizes; 374 unsigned int dma_sizes;
374 void *first_dtr = NULL; 375 void *first_dtr = NULL;
375 int dtr_cnt = 0; 376 int dtr_cnt = 0;
@@ -513,6 +514,23 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
513 else 514 else
514 skb_checksum_none_assert(skb); 515 skb_checksum_none_assert(skb);
515 516
517
518 if (ring->rx_hwts) {
519 struct skb_shared_hwtstamps *skb_hwts;
520 u32 ns = *(u32 *)(skb->head + pkt_length);
521
522 skb_hwts = skb_hwtstamps(skb);
523 skb_hwts->hwtstamp = ns_to_ktime(ns);
524 skb_hwts->syststamp.tv64 = 0;
525 }
526
527 /* rth_hash_type and rth_it_hit are non-zero regardless of
528 * whether rss is enabled. Only the rth_value is zero/non-zero
529 * if rss is disabled/enabled, so key off of that.
530 */
531 if (ext_info.rth_value)
532 skb->rxhash = ext_info.rth_value;
533
516 vxge_rx_complete(ring, skb, ext_info.vlan, 534 vxge_rx_complete(ring, skb, ext_info.vlan,
517 pkt_length, &ext_info); 535 pkt_length, &ext_info);
518 536
@@ -670,7 +688,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
670 struct vxge_vpath *vpath = NULL; 688 struct vxge_vpath *vpath = NULL;
671 struct __vxge_hw_device *hldev; 689 struct __vxge_hw_device *hldev;
672 690
673 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 691 hldev = pci_get_drvdata(vdev->pdev);
674 692
675 mac_address = (u8 *)&mac_addr; 693 mac_address = (u8 *)&mac_addr;
676 memcpy(mac_address, mac_header, ETH_ALEN); 694 memcpy(mac_address, mac_header, ETH_ALEN);
@@ -769,7 +787,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
769 return NETDEV_TX_OK; 787 return NETDEV_TX_OK;
770 } 788 }
771 789
772 vdev = (struct vxgedev *)netdev_priv(dev); 790 vdev = netdev_priv(dev);
773 791
774 if (unlikely(!is_vxge_card_up(vdev))) { 792 if (unlikely(!is_vxge_card_up(vdev))) {
775 vxge_debug_tx(VXGE_ERR, 793 vxge_debug_tx(VXGE_ERR,
@@ -1034,7 +1052,7 @@ static void vxge_set_multicast(struct net_device *dev)
1034 vxge_debug_entryexit(VXGE_TRACE, 1052 vxge_debug_entryexit(VXGE_TRACE,
1035 "%s:%d", __func__, __LINE__); 1053 "%s:%d", __func__, __LINE__);
1036 1054
1037 vdev = (struct vxgedev *)netdev_priv(dev); 1055 vdev = netdev_priv(dev);
1038 hldev = (struct __vxge_hw_device *)vdev->devh; 1056 hldev = (struct __vxge_hw_device *)vdev->devh;
1039 1057
1040 if (unlikely(!is_vxge_card_up(vdev))) 1058 if (unlikely(!is_vxge_card_up(vdev)))
@@ -1094,7 +1112,7 @@ static void vxge_set_multicast(struct net_device *dev)
1094 /* Delete previous MC's */ 1112 /* Delete previous MC's */
1095 for (i = 0; i < mcast_cnt; i++) { 1113 for (i = 0; i < mcast_cnt; i++) {
1096 list_for_each_safe(entry, next, list_head) { 1114 list_for_each_safe(entry, next, list_head) {
1097 mac_entry = (struct vxge_mac_addrs *) entry; 1115 mac_entry = (struct vxge_mac_addrs *)entry;
1098 /* Copy the mac address to delete */ 1116 /* Copy the mac address to delete */
1099 mac_address = (u8 *)&mac_entry->macaddr; 1117 mac_address = (u8 *)&mac_entry->macaddr;
1100 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1118 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1137,7 +1155,7 @@ _set_all_mcast:
1137 /* Delete previous MC's */ 1155 /* Delete previous MC's */
1138 for (i = 0; i < mcast_cnt; i++) { 1156 for (i = 0; i < mcast_cnt; i++) {
1139 list_for_each_safe(entry, next, list_head) { 1157 list_for_each_safe(entry, next, list_head) {
1140 mac_entry = (struct vxge_mac_addrs *) entry; 1158 mac_entry = (struct vxge_mac_addrs *)entry;
1141 /* Copy the mac address to delete */ 1159 /* Copy the mac address to delete */
1142 mac_address = (u8 *)&mac_entry->macaddr; 1160 mac_address = (u8 *)&mac_entry->macaddr;
1143 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1161 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1184,14 +1202,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1184{ 1202{
1185 struct sockaddr *addr = p; 1203 struct sockaddr *addr = p;
1186 struct vxgedev *vdev; 1204 struct vxgedev *vdev;
1187 struct __vxge_hw_device *hldev; 1205 struct __vxge_hw_device *hldev;
1188 enum vxge_hw_status status = VXGE_HW_OK; 1206 enum vxge_hw_status status = VXGE_HW_OK;
1189 struct macInfo mac_info_new, mac_info_old; 1207 struct macInfo mac_info_new, mac_info_old;
1190 int vpath_idx = 0; 1208 int vpath_idx = 0;
1191 1209
1192 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1210 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1193 1211
1194 vdev = (struct vxgedev *)netdev_priv(dev); 1212 vdev = netdev_priv(dev);
1195 hldev = vdev->devh; 1213 hldev = vdev->devh;
1196 1214
1197 if (!is_valid_ether_addr(addr->sa_data)) 1215 if (!is_valid_ether_addr(addr->sa_data))
@@ -1292,8 +1310,13 @@ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1292static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) 1310static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1293{ 1311{
1294 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1312 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1313 struct __vxge_hw_device *hldev;
1295 int msix_id; 1314 int msix_id;
1296 1315
1316 hldev = pci_get_drvdata(vdev->pdev);
1317
1318 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1319
1297 vxge_hw_vpath_intr_disable(vpath->handle); 1320 vxge_hw_vpath_intr_disable(vpath->handle);
1298 1321
1299 if (vdev->config.intr_type == INTA) 1322 if (vdev->config.intr_type == INTA)
@@ -1423,6 +1446,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1423 } 1446 }
1424 1447
1425 if (event == VXGE_LL_FULL_RESET) { 1448 if (event == VXGE_LL_FULL_RESET) {
1449 vxge_hw_device_wait_receive_idle(vdev->devh);
1426 vxge_hw_device_intr_disable(vdev->devh); 1450 vxge_hw_device_intr_disable(vdev->devh);
1427 1451
1428 switch (vdev->cric_err_event) { 1452 switch (vdev->cric_err_event) {
@@ -1608,8 +1632,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1608 int budget_org = budget; 1632 int budget_org = budget;
1609 struct vxge_ring *ring; 1633 struct vxge_ring *ring;
1610 1634
1611 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 1635 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1612 pci_get_drvdata(vdev->pdev);
1613 1636
1614 for (i = 0; i < vdev->no_of_vpath; i++) { 1637 for (i = 0; i < vdev->no_of_vpath; i++) {
1615 ring = &vdev->vpaths[i].ring; 1638 ring = &vdev->vpaths[i].ring;
@@ -1645,11 +1668,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1645 */ 1668 */
1646static void vxge_netpoll(struct net_device *dev) 1669static void vxge_netpoll(struct net_device *dev)
1647{ 1670{
1648 struct __vxge_hw_device *hldev; 1671 struct __vxge_hw_device *hldev;
1649 struct vxgedev *vdev; 1672 struct vxgedev *vdev;
1650 1673
1651 vdev = (struct vxgedev *)netdev_priv(dev); 1674 vdev = netdev_priv(dev);
1652 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 1675 hldev = pci_get_drvdata(vdev->pdev);
1653 1676
1654 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1677 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1655 1678
@@ -1689,15 +1712,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1689 mtable[index] = index % vdev->no_of_vpath; 1712 mtable[index] = index % vdev->no_of_vpath;
1690 } 1713 }
1691 1714
1692 /* Fill RTH hash types */
1693 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1694 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1695 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1696 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1697 hash_types.hash_type_tcpipv6ex_en =
1698 vdev->config.rth_hash_type_tcpipv6ex;
1699 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1700
1701 /* set indirection table, bucket-to-vpath mapping */ 1715 /* set indirection table, bucket-to-vpath mapping */
1702 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, 1716 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1703 vdev->no_of_vpath, 1717 vdev->no_of_vpath,
@@ -1710,12 +1724,21 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1710 return status; 1724 return status;
1711 } 1725 }
1712 1726
1727 /* Fill RTH hash types */
1728 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1729 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1730 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1731 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1732 hash_types.hash_type_tcpipv6ex_en =
1733 vdev->config.rth_hash_type_tcpipv6ex;
1734 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1735
1713 /* 1736 /*
1714 * Because the itable_set() method uses the active_table field 1737 * Because the itable_set() method uses the active_table field
1715 * for the target virtual path the RTH config should be updated 1738 * for the target virtual path the RTH config should be updated
1716 * for all VPATHs. The h/w only uses the lowest numbered VPATH 1739 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1717 * when steering frames. 1740 * when steering frames.
1718 */ 1741 */
1719 for (index = 0; index < vdev->no_of_vpath; index++) { 1742 for (index = 0; index < vdev->no_of_vpath; index++) {
1720 status = vxge_hw_vpath_rts_rth_set( 1743 status = vxge_hw_vpath_rts_rth_set(
1721 vdev->vpaths[index].handle, 1744 vdev->vpaths[index].handle,
@@ -1797,7 +1820,7 @@ static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1797{ 1820{
1798 struct list_head *entry, *next; 1821 struct list_head *entry, *next;
1799 u64 del_mac = 0; 1822 u64 del_mac = 0;
1800 u8 *mac_address = (u8 *) (&del_mac); 1823 u8 *mac_address = (u8 *)(&del_mac);
1801 1824
1802 /* Copy the mac address to delete from the list */ 1825 /* Copy the mac address to delete from the list */
1803 memcpy(mac_address, mac->macaddr, ETH_ALEN); 1826 memcpy(mac_address, mac->macaddr, ETH_ALEN);
@@ -1928,7 +1951,7 @@ static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1928} 1951}
1929 1952
1930/* reset vpaths */ 1953/* reset vpaths */
1931static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) 1954enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1932{ 1955{
1933 enum vxge_hw_status status = VXGE_HW_OK; 1956 enum vxge_hw_status status = VXGE_HW_OK;
1934 struct vxge_vpath *vpath; 1957 struct vxge_vpath *vpath;
@@ -1988,8 +2011,23 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
1988 2011
1989 for (i = 0; i < vdev->no_of_vpath; i++) { 2012 for (i = 0; i < vdev->no_of_vpath; i++) {
1990 vpath = &vdev->vpaths[i]; 2013 vpath = &vdev->vpaths[i];
1991
1992 vxge_assert(vpath->is_configured); 2014 vxge_assert(vpath->is_configured);
2015
2016 if (!vdev->titan1) {
2017 struct vxge_hw_vp_config *vcfg;
2018 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2019
2020 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2021 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2022 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2023 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2024 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2025 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2026 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2027 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2028 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2029 }
2030
1993 attr.vp_id = vpath->device_id; 2031 attr.vp_id = vpath->device_id;
1994 attr.fifo_attr.callback = vxge_xmit_compl; 2032 attr.fifo_attr.callback = vxge_xmit_compl;
1995 attr.fifo_attr.txdl_term = vxge_tx_term; 2033 attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -2024,6 +2062,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2024 vdev->config.fifo_indicate_max_pkts; 2062 vdev->config.fifo_indicate_max_pkts;
2025 vpath->ring.rx_vector_no = 0; 2063 vpath->ring.rx_vector_no = 0;
2026 vpath->ring.rx_csum = vdev->rx_csum; 2064 vpath->ring.rx_csum = vdev->rx_csum;
2065 vpath->ring.rx_hwts = vdev->rx_hwts;
2027 vpath->is_open = 1; 2066 vpath->is_open = 1;
2028 vdev->vp_handles[i] = vpath->handle; 2067 vdev->vp_handles[i] = vpath->handle;
2029 vpath->ring.gro_enable = vdev->config.gro_enable; 2068 vpath->ring.gro_enable = vdev->config.gro_enable;
@@ -2062,18 +2101,18 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2062 struct __vxge_hw_device *hldev; 2101 struct __vxge_hw_device *hldev;
2063 u64 reason; 2102 u64 reason;
2064 enum vxge_hw_status status; 2103 enum vxge_hw_status status;
2065 struct vxgedev *vdev = (struct vxgedev *) dev_id;; 2104 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2066 2105
2067 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); 2106 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2068 2107
2069 dev = vdev->ndev; 2108 dev = vdev->ndev;
2070 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 2109 hldev = pci_get_drvdata(vdev->pdev);
2071 2110
2072 if (pci_channel_offline(vdev->pdev)) 2111 if (pci_channel_offline(vdev->pdev))
2073 return IRQ_NONE; 2112 return IRQ_NONE;
2074 2113
2075 if (unlikely(!is_vxge_card_up(vdev))) 2114 if (unlikely(!is_vxge_card_up(vdev)))
2076 return IRQ_NONE; 2115 return IRQ_HANDLED;
2077 2116
2078 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, 2117 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
2079 &reason); 2118 &reason);
@@ -2301,8 +2340,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2301 2340
2302static void vxge_rem_isr(struct vxgedev *vdev) 2341static void vxge_rem_isr(struct vxgedev *vdev)
2303{ 2342{
2304 struct __vxge_hw_device *hldev; 2343 struct __vxge_hw_device *hldev;
2305 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2344 hldev = pci_get_drvdata(vdev->pdev);
2306 2345
2307#ifdef CONFIG_PCI_MSI 2346#ifdef CONFIG_PCI_MSI
2308 if (vdev->config.intr_type == MSI_X) { 2347 if (vdev->config.intr_type == MSI_X) {
@@ -2542,8 +2581,8 @@ vxge_open(struct net_device *dev)
2542 vxge_debug_entryexit(VXGE_TRACE, 2581 vxge_debug_entryexit(VXGE_TRACE,
2543 "%s: %s:%d", dev->name, __func__, __LINE__); 2582 "%s: %s:%d", dev->name, __func__, __LINE__);
2544 2583
2545 vdev = (struct vxgedev *)netdev_priv(dev); 2584 vdev = netdev_priv(dev);
2546 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2585 hldev = pci_get_drvdata(vdev->pdev);
2547 function_mode = vdev->config.device_hw_info.function_mode; 2586 function_mode = vdev->config.device_hw_info.function_mode;
2548 2587
2549 /* make sure you have link off by default every time Nic is 2588 /* make sure you have link off by default every time Nic is
@@ -2598,6 +2637,8 @@ vxge_open(struct net_device *dev)
2598 goto out2; 2637 goto out2;
2599 } 2638 }
2600 } 2639 }
2640 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2641 hldev->config.rth_en ? "enabled" : "disabled");
2601 2642
2602 for (i = 0; i < vdev->no_of_vpath; i++) { 2643 for (i = 0; i < vdev->no_of_vpath; i++) {
2603 vpath = &vdev->vpaths[i]; 2644 vpath = &vdev->vpaths[i];
@@ -2683,9 +2724,10 @@ vxge_open(struct net_device *dev)
2683 vxge_os_timer(vdev->vp_reset_timer, 2724 vxge_os_timer(vdev->vp_reset_timer,
2684 vxge_poll_vp_reset, vdev, (HZ/2)); 2725 vxge_poll_vp_reset, vdev, (HZ/2));
2685 2726
2686 if (vdev->vp_lockup_timer.function == NULL) 2727 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2687 vxge_os_timer(vdev->vp_lockup_timer, 2728 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2688 vxge_poll_vp_lockup, vdev, (HZ/2)); 2729 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2730 HZ / 2);
2689 2731
2690 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2732 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2691 2733
@@ -2767,8 +2809,8 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2767 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 2809 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2768 dev->name, __func__, __LINE__); 2810 dev->name, __func__, __LINE__);
2769 2811
2770 vdev = (struct vxgedev *)netdev_priv(dev); 2812 vdev = netdev_priv(dev);
2771 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2813 hldev = pci_get_drvdata(vdev->pdev);
2772 2814
2773 if (unlikely(!is_vxge_card_up(vdev))) 2815 if (unlikely(!is_vxge_card_up(vdev)))
2774 return 0; 2816 return 0;
@@ -2778,7 +2820,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2778 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 2820 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2779 msleep(50); 2821 msleep(50);
2780 2822
2781 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2782 if (do_io) { 2823 if (do_io) {
2783 /* Put the vpath back in normal mode */ 2824 /* Put the vpath back in normal mode */
2784 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); 2825 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2818,10 +2859,17 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2818 2859
2819 smp_wmb(); 2860 smp_wmb();
2820 } 2861 }
2821 del_timer_sync(&vdev->vp_lockup_timer); 2862
2863 if (vdev->titan1)
2864 del_timer_sync(&vdev->vp_lockup_timer);
2822 2865
2823 del_timer_sync(&vdev->vp_reset_timer); 2866 del_timer_sync(&vdev->vp_reset_timer);
2824 2867
2868 if (do_io)
2869 vxge_hw_device_wait_receive_idle(hldev);
2870
2871 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2872
2825 /* Disable napi */ 2873 /* Disable napi */
2826 if (vdev->config.intr_type != MSI_X) 2874 if (vdev->config.intr_type != MSI_X)
2827 napi_disable(&vdev->napi); 2875 napi_disable(&vdev->napi);
@@ -2838,8 +2886,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2838 if (do_io) 2886 if (do_io)
2839 vxge_hw_device_intr_disable(vdev->devh); 2887 vxge_hw_device_intr_disable(vdev->devh);
2840 2888
2841 mdelay(1000);
2842
2843 vxge_rem_isr(vdev); 2889 vxge_rem_isr(vdev);
2844 2890
2845 vxge_napi_del_all(vdev); 2891 vxge_napi_del_all(vdev);
@@ -2954,6 +3000,101 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2954 return net_stats; 3000 return net_stats;
2955} 3001}
2956 3002
3003static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
3004 int enable)
3005{
3006 enum vxge_hw_status status;
3007 u64 val64;
3008
3009 /* Timestamp is passed to the driver via the FCS, therefore we
3010 * must disable the FCS stripping by the adapter. Since this is
3011 * required for the driver to load (due to a hardware bug),
3012 * there is no need to do anything special here.
3013 */
3014 if (enable)
3015 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3016 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3017 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3018 else
3019 val64 = 0;
3020
3021 status = vxge_hw_mgmt_reg_write(vdev->devh,
3022 vxge_hw_mgmt_reg_type_mrpcim,
3023 0,
3024 offsetof(struct vxge_hw_mrpcim_reg,
3025 xmac_timestamp),
3026 val64);
3027 vxge_hw_device_flush_io(vdev->devh);
3028 return status;
3029}
3030
3031static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3032{
3033 struct hwtstamp_config config;
3034 enum vxge_hw_status status;
3035 int i;
3036
3037 if (copy_from_user(&config, data, sizeof(config)))
3038 return -EFAULT;
3039
3040 /* reserved for future extensions */
3041 if (config.flags)
3042 return -EINVAL;
3043
3044 /* Transmit HW Timestamp not supported */
3045 switch (config.tx_type) {
3046 case HWTSTAMP_TX_OFF:
3047 break;
3048 case HWTSTAMP_TX_ON:
3049 default:
3050 return -ERANGE;
3051 }
3052
3053 switch (config.rx_filter) {
3054 case HWTSTAMP_FILTER_NONE:
3055 status = vxge_timestamp_config(vdev, 0);
3056 if (status != VXGE_HW_OK)
3057 return -EFAULT;
3058
3059 vdev->rx_hwts = 0;
3060 config.rx_filter = HWTSTAMP_FILTER_NONE;
3061 break;
3062
3063 case HWTSTAMP_FILTER_ALL:
3064 case HWTSTAMP_FILTER_SOME:
3065 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3066 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3067 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3068 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3069 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3070 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3071 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3072 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3073 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3074 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3075 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3076 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3077 status = vxge_timestamp_config(vdev, 1);
3078 if (status != VXGE_HW_OK)
3079 return -EFAULT;
3080
3081 vdev->rx_hwts = 1;
3082 config.rx_filter = HWTSTAMP_FILTER_ALL;
3083 break;
3084
3085 default:
3086 return -ERANGE;
3087 }
3088
3089 for (i = 0; i < vdev->no_of_vpath; i++)
3090 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3091
3092 if (copy_to_user(data, &config, sizeof(config)))
3093 return -EFAULT;
3094
3095 return 0;
3096}
3097
2957/** 3098/**
2958 * vxge_ioctl 3099 * vxge_ioctl
2959 * @dev: Device pointer. 3100 * @dev: Device pointer.
@@ -2966,7 +3107,20 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2966 */ 3107 */
2967static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3108static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2968{ 3109{
2969 return -EOPNOTSUPP; 3110 struct vxgedev *vdev = netdev_priv(dev);
3111 int ret;
3112
3113 switch (cmd) {
3114 case SIOCSHWTSTAMP:
3115 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3116 if (ret)
3117 return ret;
3118 break;
3119 default:
3120 return -EOPNOTSUPP;
3121 }
3122
3123 return 0;
2970} 3124}
2971 3125
2972/** 3126/**
@@ -2984,7 +3138,7 @@ vxge_tx_watchdog(struct net_device *dev)
2984 3138
2985 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3139 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2986 3140
2987 vdev = (struct vxgedev *)netdev_priv(dev); 3141 vdev = netdev_priv(dev);
2988 3142
2989 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; 3143 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
2990 3144
@@ -3012,7 +3166,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3012 3166
3013 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3167 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3014 3168
3015 vdev = (struct vxgedev *)netdev_priv(dev); 3169 vdev = netdev_priv(dev);
3016 3170
3017 vpath = &vdev->vpaths[0]; 3171 vpath = &vdev->vpaths[0];
3018 if ((NULL == grp) && (vpath->is_open)) { 3172 if ((NULL == grp) && (vpath->is_open)) {
@@ -3061,7 +3215,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3061 struct vxge_vpath *vpath; 3215 struct vxge_vpath *vpath;
3062 int vp_id; 3216 int vp_id;
3063 3217
3064 vdev = (struct vxgedev *)netdev_priv(dev); 3218 vdev = netdev_priv(dev);
3065 3219
3066 /* Add these vlan to the vid table */ 3220 /* Add these vlan to the vid table */
3067 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 3221 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
@@ -3088,7 +3242,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3088 3242
3089 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3243 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3090 3244
3091 vdev = (struct vxgedev *)netdev_priv(dev); 3245 vdev = netdev_priv(dev);
3092 3246
3093 vlan_group_set_device(vdev->vlgrp, vid, NULL); 3247 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3094 3248
@@ -3125,6 +3279,19 @@ static const struct net_device_ops vxge_netdev_ops = {
3125#endif 3279#endif
3126}; 3280};
3127 3281
3282static int __devinit vxge_device_revision(struct vxgedev *vdev)
3283{
3284 int ret;
3285 u8 revision;
3286
3287 ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
3288 if (ret)
3289 return -EIO;
3290
3291 vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
3292 return 0;
3293}
3294
3128static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3295static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3129 struct vxge_config *config, 3296 struct vxge_config *config,
3130 int high_dma, int no_of_vpath, 3297 int high_dma, int no_of_vpath,
@@ -3163,6 +3330,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3163 vdev->pdev = hldev->pdev; 3330 vdev->pdev = hldev->pdev;
3164 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3331 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3165 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3332 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3333 vdev->rx_hwts = 0;
3334
3335 ret = vxge_device_revision(vdev);
3336 if (ret < 0)
3337 goto _out1;
3166 3338
3167 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3339 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3168 3340
@@ -3178,6 +3350,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3178 3350
3179 vxge_initialize_ethtool_ops(ndev); 3351 vxge_initialize_ethtool_ops(ndev);
3180 3352
3353 if (vdev->config.rth_steering != NO_STEERING) {
3354 ndev->features |= NETIF_F_RXHASH;
3355 hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
3356 }
3357
3181 /* Allocate memory for vpath */ 3358 /* Allocate memory for vpath */
3182 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3359 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3183 no_of_vpath, GFP_KERNEL); 3360 no_of_vpath, GFP_KERNEL);
@@ -3191,7 +3368,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3191 3368
3192 ndev->features |= NETIF_F_SG; 3369 ndev->features |= NETIF_F_SG;
3193 3370
3194 ndev->features |= NETIF_F_HW_CSUM; 3371 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3195 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3372 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3196 "%s : checksuming enabled", __func__); 3373 "%s : checksuming enabled", __func__);
3197 3374
@@ -3227,6 +3404,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3227 "%s: Ethernet device registered", 3404 "%s: Ethernet device registered",
3228 ndev->name); 3405 ndev->name);
3229 3406
3407 hldev->ndev = ndev;
3230 *vdev_out = vdev; 3408 *vdev_out = vdev;
3231 3409
3232 /* Resetting the Device stats */ 3410 /* Resetting the Device stats */
@@ -3261,36 +3439,29 @@ _out0:
3261 * 3439 *
3262 * This function will unregister and free network device 3440 * This function will unregister and free network device
3263 */ 3441 */
3264static void 3442static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3265vxge_device_unregister(struct __vxge_hw_device *hldev)
3266{ 3443{
3267 struct vxgedev *vdev; 3444 struct vxgedev *vdev;
3268 struct net_device *dev; 3445 struct net_device *dev;
3269 char buf[IFNAMSIZ]; 3446 char buf[IFNAMSIZ];
3270#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3271 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3272 u32 level_trace;
3273#endif
3274 3447
3275 dev = hldev->ndev; 3448 dev = hldev->ndev;
3276 vdev = netdev_priv(dev); 3449 vdev = netdev_priv(dev);
3277#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3278 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3279 level_trace = vdev->level_trace;
3280#endif
3281 vxge_debug_entryexit(level_trace,
3282 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3283 3450
3284 memcpy(buf, vdev->ndev->name, IFNAMSIZ); 3451 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3452 __func__, __LINE__);
3453
3454 strncpy(buf, dev->name, IFNAMSIZ);
3285 3455
3286 /* in 2.6 will call stop() if device is up */ 3456 /* in 2.6 will call stop() if device is up */
3287 unregister_netdev(dev); 3457 unregister_netdev(dev);
3288 3458
3289 flush_scheduled_work(); 3459 flush_scheduled_work();
3290 3460
3291 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf); 3461 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3292 vxge_debug_entryexit(level_trace, 3462 buf);
3293 "%s: %s:%d Exiting...", buf, __func__, __LINE__); 3463 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3464 __func__, __LINE__);
3294} 3465}
3295 3466
3296/* 3467/*
@@ -3304,7 +3475,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3304 enum vxge_hw_event type, u64 vp_id) 3475 enum vxge_hw_event type, u64 vp_id)
3305{ 3476{
3306 struct net_device *dev = hldev->ndev; 3477 struct net_device *dev = hldev->ndev;
3307 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 3478 struct vxgedev *vdev = netdev_priv(dev);
3308 struct vxge_vpath *vpath = NULL; 3479 struct vxge_vpath *vpath = NULL;
3309 int vpath_idx; 3480 int vpath_idx;
3310 3481
@@ -3751,9 +3922,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3751 vxge_debug_init(VXGE_TRACE, 3922 vxge_debug_init(VXGE_TRACE,
3752 "%s: MAC Address learning enabled", vdev->ndev->name); 3923 "%s: MAC Address learning enabled", vdev->ndev->name);
3753 3924
3754 vxge_debug_init(VXGE_TRACE,
3755 "%s: Rx doorbell mode enabled", vdev->ndev->name);
3756
3757 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3925 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3758 if (!vxge_bVALn(vpath_mask, i, 1)) 3926 if (!vxge_bVALn(vpath_mask, i, 1))
3759 continue; 3927 continue;
@@ -3766,14 +3934,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3766 ((struct __vxge_hw_device *)(vdev->devh))-> 3934 ((struct __vxge_hw_device *)(vdev->devh))->
3767 config.vp_config[i].rpa_strip_vlan_tag 3935 config.vp_config[i].rpa_strip_vlan_tag
3768 ? "Enabled" : "Disabled"); 3936 ? "Enabled" : "Disabled");
3769 vxge_debug_init(VXGE_TRACE,
3770 "%s: Ring blocks : %d", vdev->ndev->name,
3771 ((struct __vxge_hw_device *)(vdev->devh))->
3772 config.vp_config[i].ring.ring_blocks);
3773 vxge_debug_init(VXGE_TRACE,
3774 "%s: Fifo blocks : %d", vdev->ndev->name,
3775 ((struct __vxge_hw_device *)(vdev->devh))->
3776 config.vp_config[i].fifo.fifo_blocks);
3777 vxge_debug_ll_config(VXGE_TRACE, 3937 vxge_debug_ll_config(VXGE_TRACE,
3778 "%s: Max frags : %d", vdev->ndev->name, 3938 "%s: Max frags : %d", vdev->ndev->name,
3779 ((struct __vxge_hw_device *)(vdev->devh))-> 3939 ((struct __vxge_hw_device *)(vdev->devh))->
@@ -3813,8 +3973,7 @@ static int vxge_pm_resume(struct pci_dev *pdev)
3813static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, 3973static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3814 pci_channel_state_t state) 3974 pci_channel_state_t state)
3815{ 3975{
3816 struct __vxge_hw_device *hldev = 3976 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3817 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3818 struct net_device *netdev = hldev->ndev; 3977 struct net_device *netdev = hldev->ndev;
3819 3978
3820 netif_device_detach(netdev); 3979 netif_device_detach(netdev);
@@ -3843,8 +4002,7 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3843 */ 4002 */
3844static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) 4003static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3845{ 4004{
3846 struct __vxge_hw_device *hldev = 4005 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3847 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3848 struct net_device *netdev = hldev->ndev; 4006 struct net_device *netdev = hldev->ndev;
3849 4007
3850 struct vxgedev *vdev = netdev_priv(netdev); 4008 struct vxgedev *vdev = netdev_priv(netdev);
@@ -3869,8 +4027,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3869 */ 4027 */
3870static void vxge_io_resume(struct pci_dev *pdev) 4028static void vxge_io_resume(struct pci_dev *pdev)
3871{ 4029{
3872 struct __vxge_hw_device *hldev = 4030 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3873 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3874 struct net_device *netdev = hldev->ndev; 4031 struct net_device *netdev = hldev->ndev;
3875 4032
3876 if (netif_running(netdev)) { 4033 if (netif_running(netdev)) {
@@ -3914,6 +4071,142 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
3914 return num_functions; 4071 return num_functions;
3915} 4072}
3916 4073
4074int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4075{
4076 struct __vxge_hw_device *hldev = vdev->devh;
4077 u32 maj, min, bld, cmaj, cmin, cbld;
4078 enum vxge_hw_status status;
4079 const struct firmware *fw;
4080 int ret;
4081
4082 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4083 if (ret) {
4084 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4085 VXGE_DRIVER_NAME, fw_name);
4086 goto out;
4087 }
4088
4089 /* Load the new firmware onto the adapter */
4090 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4091 if (status != VXGE_HW_OK) {
4092 vxge_debug_init(VXGE_ERR,
4093 "%s: FW image download to adapter failed '%s'.",
4094 VXGE_DRIVER_NAME, fw_name);
4095 ret = -EIO;
4096 goto out;
4097 }
4098
4099 /* Read the version of the new firmware */
4100 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4101 if (status != VXGE_HW_OK) {
4102 vxge_debug_init(VXGE_ERR,
4103 "%s: Upgrade read version failed '%s'.",
4104 VXGE_DRIVER_NAME, fw_name);
4105 ret = -EIO;
4106 goto out;
4107 }
4108
4109 cmaj = vdev->config.device_hw_info.fw_version.major;
4110 cmin = vdev->config.device_hw_info.fw_version.minor;
4111 cbld = vdev->config.device_hw_info.fw_version.build;
4112 /* It's possible the version in /lib/firmware is not the latest version.
4113 * If so, we could get into a loop of trying to upgrade to the latest
4114 * and flashing the older version.
4115 */
4116 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4117 !override) {
4118 ret = -EINVAL;
4119 goto out;
4120 }
4121
4122 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4123 maj, min, bld);
4124
4125 /* Flash the adapter with the new firmware */
4126 status = vxge_hw_flash_fw(hldev);
4127 if (status != VXGE_HW_OK) {
4128 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4129 VXGE_DRIVER_NAME, fw_name);
4130 ret = -EIO;
4131 goto out;
4132 }
4133
4134 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4135 "hard reset before using, thus requiring a system reboot or a "
4136 "hotplug event.\n");
4137
4138out:
4139 return ret;
4140}
4141
4142static int vxge_probe_fw_update(struct vxgedev *vdev)
4143{
4144 u32 maj, min, bld;
4145 int ret, gpxe = 0;
4146 char *fw_name;
4147
4148 maj = vdev->config.device_hw_info.fw_version.major;
4149 min = vdev->config.device_hw_info.fw_version.minor;
4150 bld = vdev->config.device_hw_info.fw_version.build;
4151
4152 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4153 return 0;
4154
4155 /* Ignore the build number when determining if the current firmware is
4156 * "too new" to load the driver
4157 */
4158 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4159 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4160 "version, unable to load driver\n",
4161 VXGE_DRIVER_NAME);
4162 return -EINVAL;
4163 }
4164
4165 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4166 * work with this driver.
4167 */
4168 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4169 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4170 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4171 return -EINVAL;
4172 }
4173
4174 /* If file not specified, determine gPXE or not */
4175 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4176 int i;
4177 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4178 if (vdev->devh->eprom_versions[i]) {
4179 gpxe = 1;
4180 break;
4181 }
4182 }
4183 if (gpxe)
4184 fw_name = "vxge/X3fw-pxe.ncf";
4185 else
4186 fw_name = "vxge/X3fw.ncf";
4187
4188 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4189 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4190 * probe, so ignore them
4191 */
4192 if (ret != -EINVAL && ret != -ENOENT)
4193 return -EIO;
4194 else
4195 ret = 0;
4196
4197 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4198 VXGE_FW_VER(maj, min, 0)) {
4199 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4200 " be used with this driver.\n"
4201 "Please get the latest version from "
4202 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4203 VXGE_DRIVER_NAME, maj, min, bld);
4204 return -EINVAL;
4205 }
4206
4207 return ret;
4208}
4209
3917/** 4210/**
3918 * vxge_probe 4211 * vxge_probe
3919 * @pdev : structure containing the PCI related information of the device. 4212 * @pdev : structure containing the PCI related information of the device.
@@ -3928,7 +4221,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
3928static int __devinit 4221static int __devinit
3929vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) 4222vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3930{ 4223{
3931 struct __vxge_hw_device *hldev; 4224 struct __vxge_hw_device *hldev;
3932 enum vxge_hw_status status; 4225 enum vxge_hw_status status;
3933 int ret; 4226 int ret;
3934 int high_dma = 0; 4227 int high_dma = 0;
@@ -4072,16 +4365,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4072 goto _exit3; 4365 goto _exit3;
4073 } 4366 }
4074 4367
4075 if (ll_config->device_hw_info.fw_version.major !=
4076 VXGE_DRIVER_FW_VERSION_MAJOR) {
4077 vxge_debug_init(VXGE_ERR,
4078 "%s: Incorrect firmware version."
4079 "Please upgrade the firmware to version 1.x.x",
4080 VXGE_DRIVER_NAME);
4081 ret = -EINVAL;
4082 goto _exit3;
4083 }
4084
4085 vpath_mask = ll_config->device_hw_info.vpath_mask; 4368 vpath_mask = ll_config->device_hw_info.vpath_mask;
4086 if (vpath_mask == 0) { 4369 if (vpath_mask == 0) {
4087 vxge_debug_ll_config(VXGE_TRACE, 4370 vxge_debug_ll_config(VXGE_TRACE,
@@ -4145,11 +4428,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4145 goto _exit3; 4428 goto _exit3;
4146 } 4429 }
4147 4430
4431 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4432 ll_config->device_hw_info.fw_version.minor,
4433 ll_config->device_hw_info.fw_version.build) >=
4434 VXGE_EPROM_FW_VER) {
4435 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4436
4437 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4438 if (status != VXGE_HW_OK) {
4439 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4440 VXGE_DRIVER_NAME);
4441 /* This is a non-fatal error, continue */
4442 }
4443
4444 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4445 hldev->eprom_versions[i] = img[i].version;
4446 if (!img[i].is_valid)
4447 break;
4448 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4449 "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
4450 VXGE_EPROM_IMG_MAJOR(img[i].version),
4451 VXGE_EPROM_IMG_MINOR(img[i].version),
4452 VXGE_EPROM_IMG_FIX(img[i].version),
4453 VXGE_EPROM_IMG_BUILD(img[i].version));
4454 }
4455 }
4456
4148 /* if FCS stripping is not disabled in MAC fail driver load */ 4457 /* if FCS stripping is not disabled in MAC fail driver load */
4149 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) { 4458 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4150 vxge_debug_init(VXGE_ERR, 4459 if (status != VXGE_HW_OK) {
4151 "%s: FCS stripping is not disabled in MAC" 4460 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4152 " failing driver load", VXGE_DRIVER_NAME); 4461 " failing driver load", VXGE_DRIVER_NAME);
4153 ret = -EINVAL; 4462 ret = -EINVAL;
4154 goto _exit4; 4463 goto _exit4;
4155 } 4464 }
@@ -4163,28 +4472,32 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4163 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4472 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4164 ll_config->addr_learn_en = addr_learn_en; 4473 ll_config->addr_learn_en = addr_learn_en;
4165 ll_config->rth_algorithm = RTH_ALG_JENKINS; 4474 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4166 ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; 4475 ll_config->rth_hash_type_tcpipv4 = 1;
4167 ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; 4476 ll_config->rth_hash_type_ipv4 = 0;
4168 ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4477 ll_config->rth_hash_type_tcpipv6 = 0;
4169 ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4478 ll_config->rth_hash_type_ipv6 = 0;
4170 ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4479 ll_config->rth_hash_type_tcpipv6ex = 0;
4171 ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4480 ll_config->rth_hash_type_ipv6ex = 0;
4172 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; 4481 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4173 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4482 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4174 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4483 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4175 4484
4176 if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, 4485 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4177 &vdev)) { 4486 &vdev);
4487 if (ret) {
4178 ret = -EINVAL; 4488 ret = -EINVAL;
4179 goto _exit4; 4489 goto _exit4;
4180 } 4490 }
4181 4491
4492 ret = vxge_probe_fw_update(vdev);
4493 if (ret)
4494 goto _exit5;
4495
4182 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); 4496 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4183 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4497 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4184 vxge_hw_device_trace_level_get(hldev)); 4498 vxge_hw_device_trace_level_get(hldev));
4185 4499
4186 /* set private HW device info */ 4500 /* set private HW device info */
4187 hldev->ndev = vdev->ndev;
4188 vdev->mtu = VXGE_HW_DEFAULT_MTU; 4501 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4189 vdev->bar0 = attr.bar0; 4502 vdev->bar0 = attr.bar0;
4190 vdev->max_vpath_supported = max_vpath_supported; 4503 vdev->max_vpath_supported = max_vpath_supported;
@@ -4278,15 +4591,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4278 4591
4279 /* Copy the station mac address to the list */ 4592 /* Copy the station mac address to the list */
4280 for (i = 0; i < vdev->no_of_vpath; i++) { 4593 for (i = 0; i < vdev->no_of_vpath; i++) {
4281 entry = (struct vxge_mac_addrs *) 4594 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4282 kzalloc(sizeof(struct vxge_mac_addrs),
4283 GFP_KERNEL);
4284 if (NULL == entry) { 4595 if (NULL == entry) {
4285 vxge_debug_init(VXGE_ERR, 4596 vxge_debug_init(VXGE_ERR,
4286 "%s: mac_addr_list : memory allocation failed", 4597 "%s: mac_addr_list : memory allocation failed",
4287 vdev->ndev->name); 4598 vdev->ndev->name);
4288 ret = -EPERM; 4599 ret = -EPERM;
4289 goto _exit5; 4600 goto _exit6;
4290 } 4601 }
4291 macaddr = (u8 *)&entry->macaddr; 4602 macaddr = (u8 *)&entry->macaddr;
4292 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); 4603 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4326,10 +4637,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4326 kfree(ll_config); 4637 kfree(ll_config);
4327 return 0; 4638 return 0;
4328 4639
4329_exit5: 4640_exit6:
4330 for (i = 0; i < vdev->no_of_vpath; i++) 4641 for (i = 0; i < vdev->no_of_vpath; i++)
4331 vxge_free_mac_add_list(&vdev->vpaths[i]); 4642 vxge_free_mac_add_list(&vdev->vpaths[i]);
4332 4643_exit5:
4333 vxge_device_unregister(hldev); 4644 vxge_device_unregister(hldev);
4334_exit4: 4645_exit4:
4335 pci_disable_sriov(pdev); 4646 pci_disable_sriov(pdev);
@@ -4354,34 +4665,25 @@ _exit0:
4354 * Description: This function is called by the Pci subsystem to release a 4665 * Description: This function is called by the Pci subsystem to release a
4355 * PCI device and free up all resource held up by the device. 4666 * PCI device and free up all resource held up by the device.
4356 */ 4667 */
4357static void __devexit 4668static void __devexit vxge_remove(struct pci_dev *pdev)
4358vxge_remove(struct pci_dev *pdev)
4359{ 4669{
4360 struct __vxge_hw_device *hldev; 4670 struct __vxge_hw_device *hldev;
4361 struct vxgedev *vdev = NULL; 4671 struct vxgedev *vdev = NULL;
4362 struct net_device *dev; 4672 struct net_device *dev;
4363 int i = 0; 4673 int i = 0;
4364#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4365 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4366 u32 level_trace;
4367#endif
4368 4674
4369 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev); 4675 hldev = pci_get_drvdata(pdev);
4370 4676
4371 if (hldev == NULL) 4677 if (hldev == NULL)
4372 return; 4678 return;
4679
4373 dev = hldev->ndev; 4680 dev = hldev->ndev;
4374 vdev = netdev_priv(dev); 4681 vdev = netdev_priv(dev);
4375 4682
4376#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ 4683 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4377 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4378 level_trace = vdev->level_trace;
4379#endif
4380 vxge_debug_entryexit(level_trace,
4381 "%s:%d", __func__, __LINE__);
4382 4684
4383 vxge_debug_init(level_trace, 4685 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4384 "%s : removing PCI device...", __func__); 4686 __func__);
4385 vxge_device_unregister(hldev); 4687 vxge_device_unregister(hldev);
4386 4688
4387 for (i = 0; i < vdev->no_of_vpath; i++) { 4689 for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -4399,16 +4701,16 @@ vxge_remove(struct pci_dev *pdev)
4399 /* we are safe to free it now */ 4701 /* we are safe to free it now */
4400 free_netdev(dev); 4702 free_netdev(dev);
4401 4703
4402 vxge_debug_init(level_trace, 4704 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4403 "%s:%d Device unregistered", __func__, __LINE__); 4705 __func__, __LINE__);
4404 4706
4405 vxge_hw_device_terminate(hldev); 4707 vxge_hw_device_terminate(hldev);
4406 4708
4407 pci_disable_device(pdev); 4709 pci_disable_device(pdev);
4408 pci_release_regions(pdev); 4710 pci_release_regions(pdev);
4409 pci_set_drvdata(pdev, NULL); 4711 pci_set_drvdata(pdev, NULL);
4410 vxge_debug_entryexit(level_trace, 4712 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4411 "%s:%d Exiting...", __func__, __LINE__); 4713 __LINE__);
4412} 4714}
4413 4715
4414static struct pci_error_handlers vxge_err_handler = { 4716static struct pci_error_handlers vxge_err_handler = {
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index de64536cb7d..953cb0ded3e 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -29,6 +29,9 @@
29 29
30#define PCI_DEVICE_ID_TITAN_WIN 0x5733 30#define PCI_DEVICE_ID_TITAN_WIN 0x5733
31#define PCI_DEVICE_ID_TITAN_UNI 0x5833 31#define PCI_DEVICE_ID_TITAN_UNI 0x5833
32#define VXGE_HW_TITAN1_PCI_REVISION 1
33#define VXGE_HW_TITAN1A_PCI_REVISION 2
34
32#define VXGE_USE_DEFAULT 0xffffffff 35#define VXGE_USE_DEFAULT 0xffffffff
33#define VXGE_HW_VPATH_MSIX_ACTIVE 4 36#define VXGE_HW_VPATH_MSIX_ACTIVE 4
34#define VXGE_ALARM_MSIX_ID 2 37#define VXGE_ALARM_MSIX_ID 2
@@ -53,11 +56,13 @@
53 56
54#define VXGE_TTI_BTIMER_VAL 250000 57#define VXGE_TTI_BTIMER_VAL 250000
55 58
56#define VXGE_TTI_LTIMER_VAL 1000 59#define VXGE_TTI_LTIMER_VAL 1000
57#define VXGE_TTI_RTIMER_VAL 0 60#define VXGE_T1A_TTI_LTIMER_VAL 80
58#define VXGE_RTI_BTIMER_VAL 250 61#define VXGE_TTI_RTIMER_VAL 0
59#define VXGE_RTI_LTIMER_VAL 100 62#define VXGE_T1A_TTI_RTIMER_VAL 400
60#define VXGE_RTI_RTIMER_VAL 0 63#define VXGE_RTI_BTIMER_VAL 250
64#define VXGE_RTI_LTIMER_VAL 100
65#define VXGE_RTI_RTIMER_VAL 0
61#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH 66#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
62#define VXGE_ISR_POLLING_CNT 8 67#define VXGE_ISR_POLLING_CNT 8
63#define VXGE_MAX_CONFIG_DEV 0xFF 68#define VXGE_MAX_CONFIG_DEV 0xFF
@@ -76,14 +81,32 @@
76#define TTI_TX_UFC_B 40 81#define TTI_TX_UFC_B 40
77#define TTI_TX_UFC_C 60 82#define TTI_TX_UFC_C 60
78#define TTI_TX_UFC_D 100 83#define TTI_TX_UFC_D 100
84#define TTI_T1A_TX_UFC_A 30
85#define TTI_T1A_TX_UFC_B 80
86/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
87/* Slope - 93 */
88/* 60 - 9k Mtu, 140 - 1.5k mtu */
89#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
90
91/* Slope - 37 */
92/* 100 - 9k Mtu, 300 - 1.5k mtu */
93#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
94
95
96#define RTI_RX_URANGE_A 5
97#define RTI_RX_URANGE_B 15
98#define RTI_RX_URANGE_C 40
99#define RTI_T1A_RX_URANGE_A 1
100#define RTI_T1A_RX_URANGE_B 20
101#define RTI_T1A_RX_URANGE_C 50
102#define RTI_RX_UFC_A 1
103#define RTI_RX_UFC_B 5
104#define RTI_RX_UFC_C 10
105#define RTI_RX_UFC_D 15
106#define RTI_T1A_RX_UFC_B 20
107#define RTI_T1A_RX_UFC_C 50
108#define RTI_T1A_RX_UFC_D 60
79 109
80#define RTI_RX_URANGE_A 5
81#define RTI_RX_URANGE_B 15
82#define RTI_RX_URANGE_C 40
83#define RTI_RX_UFC_A 1
84#define RTI_RX_UFC_B 5
85#define RTI_RX_UFC_C 10
86#define RTI_RX_UFC_D 15
87 110
88/* Milli secs timer period */ 111/* Milli secs timer period */
89#define VXGE_TIMER_DELAY 10000 112#define VXGE_TIMER_DELAY 10000
@@ -145,15 +168,15 @@ struct vxge_config {
145 168
146 int addr_learn_en; 169 int addr_learn_en;
147 170
148 int rth_steering; 171 u32 rth_steering:2,
149 int rth_algorithm; 172 rth_algorithm:2,
150 int rth_hash_type_tcpipv4; 173 rth_hash_type_tcpipv4:1,
151 int rth_hash_type_ipv4; 174 rth_hash_type_ipv4:1,
152 int rth_hash_type_tcpipv6; 175 rth_hash_type_tcpipv6:1,
153 int rth_hash_type_ipv6; 176 rth_hash_type_ipv6:1,
154 int rth_hash_type_tcpipv6ex; 177 rth_hash_type_tcpipv6ex:1,
155 int rth_hash_type_ipv6ex; 178 rth_hash_type_ipv6ex:1,
156 int rth_bkt_sz; 179 rth_bkt_sz:8;
157 int rth_jhash_golden_ratio; 180 int rth_jhash_golden_ratio;
158 int tx_steering_type; 181 int tx_steering_type;
159 int fifo_indicate_max_pkts; 182 int fifo_indicate_max_pkts;
@@ -248,8 +271,9 @@ struct vxge_ring {
248 */ 271 */
249 int driver_id; 272 int driver_id;
250 273
251 /* copy of the flag indicating whether rx_csum is to be used */ 274 /* copy of the flag indicating whether rx_csum is to be used */
252 u32 rx_csum; 275 u32 rx_csum:1,
276 rx_hwts:1;
253 277
254 int pkts_processed; 278 int pkts_processed;
255 int budget; 279 int budget;
@@ -327,7 +351,9 @@ struct vxgedev {
327 u16 all_multi_flg; 351 u16 all_multi_flg;
328 352
329 /* A flag indicating whether rx_csum is to be used or not. */ 353 /* A flag indicating whether rx_csum is to be used or not. */
330 u32 rx_csum; 354 u32 rx_csum:1,
355 rx_hwts:1,
356 titan1:1;
331 357
332 struct vxge_msix_entry *vxge_entries; 358 struct vxge_msix_entry *vxge_entries;
333 struct msix_entry *entries; 359 struct msix_entry *entries;
@@ -387,8 +413,6 @@ struct vxge_tx_priv {
387 static int p = val; \ 413 static int p = val; \
388 module_param(p, int, 0) 414 module_param(p, int, 0)
389 415
390#define vxge_os_bug(fmt...) { printk(fmt); BUG(); }
391
392#define vxge_os_timer(timer, handle, arg, exp) do { \ 416#define vxge_os_timer(timer, handle, arg, exp) do { \
393 init_timer(&timer); \ 417 init_timer(&timer); \
394 timer.function = handle; \ 418 timer.function = handle; \
@@ -397,6 +421,11 @@ struct vxge_tx_priv {
397 } while (0); 421 } while (0);
398 422
399extern void vxge_initialize_ethtool_ops(struct net_device *ndev); 423extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
424
425enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
426
427int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
428
400/** 429/**
401 * #define VXGE_DEBUG_INIT: debug for initialization functions 430 * #define VXGE_DEBUG_INIT: debug for initialization functions
402 * #define VXGE_DEBUG_TX : debug transmit related functions 431 * #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 3dd5c9615ef..3e658b17594 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -49,6 +49,33 @@
49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17 49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17 50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
51 51
52#define VXGE_HW_FW_API_GET_EPROM_REV 31
53
54#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
55#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
56#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
57#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
58
59#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
60#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
61#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
62#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
63#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
64
65#define VXGE_HW_FW_API_GET_FUNC_MODE 29
66#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
67
68#define VXGE_HW_FW_UPGRADE_MEMO 13
69#define VXGE_HW_FW_UPGRADE_ACTION 16
70#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
71#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
72#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
73#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
74
75#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
76#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
77#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
78
52#define VXGE_HW_ASIC_MODE_RESERVED 0 79#define VXGE_HW_ASIC_MODE_RESERVED 0
53#define VXGE_HW_ASIC_MODE_NO_IOV 1 80#define VXGE_HW_ASIC_MODE_NO_IOV 1
54#define VXGE_HW_ASIC_MODE_SR_IOV 2 81#define VXGE_HW_ASIC_MODE_SR_IOV 2
@@ -165,13 +192,13 @@
165#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 192#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
166#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3 193#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
167#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 194#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
168#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 195#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
169#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 196#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
170#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 197#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
171#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 198#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
172#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 199#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
173#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11 200#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
174#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 201#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
175#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13 202#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
176 203
177#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ 204#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
@@ -437,6 +464,7 @@
437#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \ 464#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
438 vxge_bVALn(bits, 48, 16) 465 vxge_bVALn(bits, 48, 16)
439#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16) 466#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
467#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
440 468
441#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\ 469#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
442 vxge_bVALn(bits, 0, 18) 470 vxge_bVALn(bits, 0, 18)
@@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg {
3998#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9) 4026#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
3999#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9) 4027#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
4000#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9) 4028#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
4029#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
4001/*0x00a78*/ u64 prc_cfg7; 4030/*0x00a78*/ u64 prc_cfg7;
4002#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2) 4031#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
4003#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11) 4032#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 9890d4d596d..1fceee87622 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1904,34 +1904,6 @@ enum vxge_hw_ring_tcode {
1904 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF 1904 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
1905}; 1905};
1906 1906
1907/**
1908 * enum enum vxge_hw_ring_hash_type - RTH hash types
1909 * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
1910 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
1911 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
1912 * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
1913 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
1914 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
1915 * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
1916 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
1917 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
1918 * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
1919 *
1920 * RTH hash types
1921 */
1922enum vxge_hw_ring_hash_type {
1923 VXGE_HW_RING_HASH_TYPE_NONE = 0x0,
1924 VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1,
1925 VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2,
1926 VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3,
1927 VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4,
1928 VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5,
1929 VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6,
1930 VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7,
1931 VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8,
1932 VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9
1933};
1934
1935enum vxge_hw_status vxge_hw_ring_rxd_reserve( 1907enum vxge_hw_status vxge_hw_ring_rxd_reserve(
1936 struct __vxge_hw_ring *ring_handle, 1908 struct __vxge_hw_ring *ring_handle,
1937 void **rxdh); 1909 void **rxdh);
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 53fefe13736..9c93e0a1717 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,7 +16,34 @@
16 16
17#define VXGE_VERSION_MAJOR "2" 17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "0" 18#define VXGE_VERSION_MINOR "0"
19#define VXGE_VERSION_FIX "9" 19#define VXGE_VERSION_FIX "11"
20#define VXGE_VERSION_BUILD "20840" 20#define VXGE_VERSION_BUILD "21932"
21#define VXGE_VERSION_FOR "k" 21#define VXGE_VERSION_FOR "k"
22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
24
25#define VXGE_DEAD_FW_VER_MAJOR 1
26#define VXGE_DEAD_FW_VER_MINOR 4
27#define VXGE_DEAD_FW_VER_BUILD 4
28
29#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
30 VXGE_DEAD_FW_VER_MINOR, \
31 VXGE_DEAD_FW_VER_BUILD)
32
33#define VXGE_EPROM_FW_VER_MAJOR 1
34#define VXGE_EPROM_FW_VER_MINOR 6
35#define VXGE_EPROM_FW_VER_BUILD 1
36
37#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
38 VXGE_EPROM_FW_VER_MINOR, \
39 VXGE_EPROM_FW_VER_BUILD)
40
41#define VXGE_CERT_FW_VER_MAJOR 1
42#define VXGE_CERT_FW_VER_MINOR 8
43#define VXGE_CERT_FW_VER_BUILD 1
44
45#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
46 VXGE_CERT_FW_VER_MINOR, \
47 VXGE_CERT_FW_VER_BUILD)
48
22#endif 49#endif
diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c
index 4604de09a8b..6452c5055a6 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.c
+++ b/drivers/net/wireless/ath/ar9170/cmd.c
@@ -54,7 +54,7 @@ int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len)
54 54
55int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val) 55int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
56{ 56{
57 __le32 buf[2] = { 57 const __le32 buf[2] = {
58 cpu_to_le32(reg), 58 cpu_to_le32(reg),
59 cpu_to_le32(val), 59 cpu_to_le32(val),
60 }; 60 };
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 501050c0296..20ea68c59f7 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -104,6 +104,11 @@ enum ath_cipher {
104 ATH_CIPHER_MIC = 127 104 ATH_CIPHER_MIC = 127
105}; 105};
106 106
107enum ath_drv_info {
108 AR7010_DEVICE = BIT(0),
109 AR9287_DEVICE = BIT(1),
110};
111
107/** 112/**
108 * struct ath_ops - Register read/write operations 113 * struct ath_ops - Register read/write operations
109 * 114 *
@@ -147,6 +152,7 @@ struct ath_common {
147 u8 rx_chainmask; 152 u8 rx_chainmask;
148 153
149 u32 rx_bufsize; 154 u32 rx_bufsize;
155 u32 driver_info;
150 156
151 u32 keymax; 157 u32 keymax;
152 DECLARE_BITMAP(keymap, ATH_KEYMAX); 158 DECLARE_BITMAP(keymap, ATH_KEYMAX);
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index eb83b7b4d0e..47844575caa 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -4,6 +4,7 @@ config ATH5K
4 select MAC80211_LEDS 4 select MAC80211_LEDS
5 select LEDS_CLASS 5 select LEDS_CLASS
6 select NEW_LEDS 6 select NEW_LEDS
7 select AVERAGE
7 ---help--- 8 ---help---
8 This module adds support for wireless adapters based on 9 This module adds support for wireless adapters based on
9 Atheros 5xxx chipset. 10 Atheros 5xxx chipset.
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index f1419198a47..6b75b22a929 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -63,15 +63,15 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
63 * so i stick with the HAL version for now... 63 * so i stick with the HAL version for now...
64 */ 64 */
65#if 0 65#if 0
66 const s8 hi[] = { -18, -18, -16, -14, -12 }; 66 static const s8 hi[] = { -18, -18, -16, -14, -12 };
67 const s8 lo[] = { -52, -56, -60, -64, -70 }; 67 static const s8 lo[] = { -52, -56, -60, -64, -70 };
68 const s8 sz[] = { -34, -41, -48, -55, -62 }; 68 static const s8 sz[] = { -34, -41, -48, -55, -62 };
69 const s8 fr[] = { -70, -72, -75, -78, -80 }; 69 static const s8 fr[] = { -70, -72, -75, -78, -80 };
70#else 70#else
71 const s8 sz[] = { -55, -62 }; 71 static const s8 sz[] = { -55, -62 };
72 const s8 lo[] = { -64, -70 }; 72 static const s8 lo[] = { -64, -70 };
73 const s8 hi[] = { -14, -12 }; 73 static const s8 hi[] = { -14, -12 };
74 const s8 fr[] = { -78, -80 }; 74 static const s8 fr[] = { -78, -80 };
75#endif 75#endif
76 if (level < 0 || level >= ARRAY_SIZE(sz)) { 76 if (level < 0 || level >= ARRAY_SIZE(sz)) {
77 ATH5K_ERR(ah->ah_sc, "noise immuniy level %d out of range", 77 ATH5K_ERR(ah->ah_sc, "noise immuniy level %d out of range",
@@ -102,7 +102,7 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
102void 102void
103ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level) 103ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
104{ 104{
105 const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 }; 105 static const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
106 106
107 if (level < 0 || level >= ARRAY_SIZE(val) || 107 if (level < 0 || level >= ARRAY_SIZE(val) ||
108 level > ah->ah_sc->ani_state.max_spur_level) { 108 level > ah->ah_sc->ani_state.max_spur_level) {
@@ -127,7 +127,7 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
127void 127void
128ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level) 128ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
129{ 129{
130 const int val[] = { 0, 4, 8 }; 130 static const int val[] = { 0, 4, 8 };
131 131
132 if (level < 0 || level >= ARRAY_SIZE(val)) { 132 if (level < 0 || level >= ARRAY_SIZE(val)) {
133 ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level); 133 ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level);
@@ -151,12 +151,12 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
151void 151void
152ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on) 152ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
153{ 153{
154 const int m1l[] = { 127, 50 }; 154 static const int m1l[] = { 127, 50 };
155 const int m2l[] = { 127, 40 }; 155 static const int m2l[] = { 127, 40 };
156 const int m1[] = { 127, 0x4d }; 156 static const int m1[] = { 127, 0x4d };
157 const int m2[] = { 127, 0x40 }; 157 static const int m2[] = { 127, 0x40 };
158 const int m2cnt[] = { 31, 16 }; 158 static const int m2cnt[] = { 31, 16 };
159 const int m2lcnt[] = { 63, 48 }; 159 static const int m2lcnt[] = { 63, 48 };
160 160
161 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR, 161 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
162 AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]); 162 AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
@@ -192,7 +192,7 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
192void 192void
193ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on) 193ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
194{ 194{
195 const int val[] = { 8, 6 }; 195 static const int val[] = { 8, 6 };
196 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR, 196 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
197 AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]); 197 AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
198 ah->ah_sc->ani_state.cck_weak_sig = on; 198 ah->ah_sc->ani_state.cck_weak_sig = on;
@@ -216,7 +216,7 @@ static void
216ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as, 216ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
217 bool ofdm_trigger) 217 bool ofdm_trigger)
218{ 218{
219 int rssi = ah->ah_beacon_rssi_avg.avg; 219 int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
220 220
221 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)", 221 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
222 ofdm_trigger ? "ODFM" : "CCK"); 222 ofdm_trigger ? "ODFM" : "CCK");
@@ -301,7 +301,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
301static void 301static void
302ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as) 302ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
303{ 303{
304 int rssi = ah->ah_beacon_rssi_avg.avg; 304 int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
305 305
306 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity"); 306 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
307 307
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 308b79e1ff0..2718136e488 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/average.h>
28#include <net/mac80211.h> 29#include <net/mac80211.h>
29 30
30/* RX/TX descriptor hw structs 31/* RX/TX descriptor hw structs
@@ -1102,7 +1103,7 @@ struct ath5k_hw {
1102 struct ath5k_nfcal_hist ah_nfcal_hist; 1103 struct ath5k_nfcal_hist ah_nfcal_hist;
1103 1104
1104 /* average beacon RSSI in our BSS (used by ANI) */ 1105 /* average beacon RSSI in our BSS (used by ANI) */
1105 struct ath5k_avg_val ah_beacon_rssi_avg; 1106 struct ewma ah_beacon_rssi_avg;
1106 1107
1107 /* noise floor from last periodic calibration */ 1108 /* noise floor from last periodic calibration */
1108 s32 ah_noise_floor; 1109 s32 ah_noise_floor;
@@ -1315,27 +1316,4 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
1315 return retval; 1316 return retval;
1316} 1317}
1317 1318
1318#define AVG_SAMPLES 8
1319#define AVG_FACTOR 1000
1320
1321/**
1322 * ath5k_moving_average - Exponentially weighted moving average
1323 * @avg: average structure
1324 * @val: current value
1325 *
1326 * This implementation make use of a struct ath5k_avg_val to prevent rounding
1327 * errors.
1328 */
1329static inline struct ath5k_avg_val
1330ath5k_moving_average(const struct ath5k_avg_val avg, const int val)
1331{
1332 struct ath5k_avg_val new;
1333 new.avg_weight = avg.avg_weight ?
1334 (((avg.avg_weight * ((AVG_SAMPLES) - 1)) +
1335 (val * (AVG_FACTOR))) / (AVG_SAMPLES)) :
1336 (val * (AVG_FACTOR));
1337 new.avg = new.avg_weight / (AVG_FACTOR);
1338 return new;
1339}
1340
1341#endif 1319#endif
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 8251946842e..7f783d9462a 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -549,7 +549,7 @@ static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
549 /* Calculate combined mode - when APs are active, operate in AP mode. 549 /* Calculate combined mode - when APs are active, operate in AP mode.
550 * Otherwise use the mode of the new interface. This can currently 550 * Otherwise use the mode of the new interface. This can currently
551 * only deal with combinations of APs and STAs. Only one ad-hoc 551 * only deal with combinations of APs and STAs. Only one ad-hoc
552 * interfaces is allowed above. 552 * interfaces is allowed.
553 */ 553 */
554 if (avf->opmode == NL80211_IFTYPE_AP) 554 if (avf->opmode == NL80211_IFTYPE_AP)
555 iter_data->opmode = NL80211_IFTYPE_AP; 555 iter_data->opmode = NL80211_IFTYPE_AP;
@@ -558,16 +558,8 @@ static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
558 iter_data->opmode = avf->opmode; 558 iter_data->opmode = avf->opmode;
559} 559}
560 560
561static void ath_do_set_opmode(struct ath5k_softc *sc) 561static void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
562{ 562 struct ieee80211_vif *vif)
563 struct ath5k_hw *ah = sc->ah;
564 ath5k_hw_set_opmode(ah, sc->opmode);
565 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
566 sc->opmode, ath_opmode_to_string(sc->opmode));
567}
568
569void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
570 struct ieee80211_vif *vif)
571{ 563{
572 struct ath_common *common = ath5k_hw_common(sc->ah); 564 struct ath_common *common = ath5k_hw_common(sc->ah);
573 struct ath_vif_iter_data iter_data; 565 struct ath_vif_iter_data iter_data;
@@ -595,7 +587,9 @@ void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
595 /* Nothing active, default to station mode */ 587 /* Nothing active, default to station mode */
596 sc->opmode = NL80211_IFTYPE_STATION; 588 sc->opmode = NL80211_IFTYPE_STATION;
597 589
598 ath_do_set_opmode(sc); 590 ath5k_hw_set_opmode(sc->ah, sc->opmode);
591 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
592 sc->opmode, ath_opmode_to_string(sc->opmode));
599 593
600 if (iter_data.need_set_hw_addr && iter_data.found_active) 594 if (iter_data.need_set_hw_addr && iter_data.found_active)
601 ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac); 595 ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);
@@ -1307,8 +1301,7 @@ ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
1307 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0) 1301 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
1308 return; 1302 return;
1309 1303
1310 ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg, 1304 ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1311 rssi);
1312 1305
1313 /* in IBSS mode we should keep RSSI statistics per neighbour */ 1306 /* in IBSS mode we should keep RSSI statistics per neighbour */
1314 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */ 1307 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
@@ -2562,6 +2555,7 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
2562 ah->ah_cal_next_full = jiffies; 2555 ah->ah_cal_next_full = jiffies;
2563 ah->ah_cal_next_ani = jiffies; 2556 ah->ah_cal_next_ani = jiffies;
2564 ah->ah_cal_next_nf = jiffies; 2557 ah->ah_cal_next_nf = jiffies;
2558 ewma_init(&ah->ah_beacon_rssi_avg, 1000, 8);
2565 2559
2566 /* 2560 /*
2567 * Change channels and update the h/w rate map if we're switching; 2561 * Change channels and update the h/w rate map if we're switching;
@@ -3206,14 +3200,32 @@ static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
3206{ 3200{
3207 struct ath5k_softc *sc = hw->priv; 3201 struct ath5k_softc *sc = hw->priv;
3208 struct ieee80211_conf *conf = &hw->conf; 3202 struct ieee80211_conf *conf = &hw->conf;
3203 struct ath_common *common = ath5k_hw_common(sc->ah);
3204 struct ath_cycle_counters *cc = &common->cc_survey;
3205 unsigned int div = common->clockrate * 1000;
3209 3206
3210 if (idx != 0) 3207 if (idx != 0)
3211 return -ENOENT; 3208 return -ENOENT;
3212 3209
3213 survey->channel = conf->channel; 3210 survey->channel = conf->channel;
3214 survey->filled = SURVEY_INFO_NOISE_DBM; 3211 survey->filled = SURVEY_INFO_NOISE_DBM;
3215 survey->noise = sc->ah->ah_noise_floor; 3212 survey->noise = sc->ah->ah_noise_floor;
3216 3213
3214 spin_lock_bh(&common->cc_lock);
3215 ath_hw_cycle_counters_update(common);
3216 if (cc->cycles > 0) {
3217 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
3218 SURVEY_INFO_CHANNEL_TIME_BUSY |
3219 SURVEY_INFO_CHANNEL_TIME_RX |
3220 SURVEY_INFO_CHANNEL_TIME_TX;
3221 survey->channel_time += cc->cycles / div;
3222 survey->channel_time_busy += cc->rx_busy / div;
3223 survey->channel_time_rx += cc->rx_frame / div;
3224 survey->channel_time_tx += cc->tx_frame / div;
3225 }
3226 memset(cc, 0, sizeof(*cc));
3227 spin_unlock_bh(&common->cc_lock);
3228
3217 return 0; 3229 return 0;
3218} 3230}
3219 3231
@@ -3395,6 +3407,36 @@ static int ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3395 return ret; 3407 return ret;
3396} 3408}
3397 3409
3410static int ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
3411{
3412 struct ath5k_softc *sc = hw->priv;
3413
3414 if (tx_ant == 1 && rx_ant == 1)
3415 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
3416 else if (tx_ant == 2 && rx_ant == 2)
3417 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
3418 else if ((tx_ant & 3) == 3 && (rx_ant & 3) == 3)
3419 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
3420 else
3421 return -EINVAL;
3422 return 0;
3423}
3424
3425static int ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
3426{
3427 struct ath5k_softc *sc = hw->priv;
3428
3429 switch (sc->ah->ah_ant_mode) {
3430 case AR5K_ANTMODE_FIXED_A:
3431 *tx_ant = 1; *rx_ant = 1; break;
3432 case AR5K_ANTMODE_FIXED_B:
3433 *tx_ant = 2; *rx_ant = 2; break;
3434 case AR5K_ANTMODE_DEFAULT:
3435 *tx_ant = 3; *rx_ant = 3; break;
3436 }
3437 return 0;
3438}
3439
3398static const struct ieee80211_ops ath5k_hw_ops = { 3440static const struct ieee80211_ops ath5k_hw_ops = {
3399 .tx = ath5k_tx, 3441 .tx = ath5k_tx,
3400 .start = ath5k_start, 3442 .start = ath5k_start,
@@ -3415,6 +3457,8 @@ static const struct ieee80211_ops ath5k_hw_ops = {
3415 .sw_scan_start = ath5k_sw_scan_start, 3457 .sw_scan_start = ath5k_sw_scan_start,
3416 .sw_scan_complete = ath5k_sw_scan_complete, 3458 .sw_scan_complete = ath5k_sw_scan_complete,
3417 .set_coverage_class = ath5k_set_coverage_class, 3459 .set_coverage_class = ath5k_set_coverage_class,
3460 .set_antenna = ath5k_set_antenna,
3461 .get_antenna = ath5k_get_antenna,
3418}; 3462};
3419 3463
3420/********************\ 3464/********************\
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index acda56ee521..7d785cb60ce 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -554,63 +554,63 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
554 554
555 len += snprintf(buf+len, sizeof(buf)-len, 555 len += snprintf(buf+len, sizeof(buf)-len,
556 "RX\n---------------------\n"); 556 "RX\n---------------------\n");
557 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%d\t(%d%%)\n", 557 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
558 st->rxerr_crc, 558 st->rxerr_crc,
559 st->rx_all_count > 0 ? 559 st->rx_all_count > 0 ?
560 st->rxerr_crc*100/st->rx_all_count : 0); 560 st->rxerr_crc*100/st->rx_all_count : 0);
561 len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%d\t(%d%%)\n", 561 len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%u\t(%u%%)\n",
562 st->rxerr_phy, 562 st->rxerr_phy,
563 st->rx_all_count > 0 ? 563 st->rx_all_count > 0 ?
564 st->rxerr_phy*100/st->rx_all_count : 0); 564 st->rxerr_phy*100/st->rx_all_count : 0);
565 for (i = 0; i < 32; i++) { 565 for (i = 0; i < 32; i++) {
566 if (st->rxerr_phy_code[i]) 566 if (st->rxerr_phy_code[i])
567 len += snprintf(buf+len, sizeof(buf)-len, 567 len += snprintf(buf+len, sizeof(buf)-len,
568 " phy_err[%d]\t%d\n", 568 " phy_err[%u]\t%u\n",
569 i, st->rxerr_phy_code[i]); 569 i, st->rxerr_phy_code[i]);
570 } 570 }
571 571
572 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n", 572 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%u\t(%u%%)\n",
573 st->rxerr_fifo, 573 st->rxerr_fifo,
574 st->rx_all_count > 0 ? 574 st->rx_all_count > 0 ?
575 st->rxerr_fifo*100/st->rx_all_count : 0); 575 st->rxerr_fifo*100/st->rx_all_count : 0);
576 len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%d\t(%d%%)\n", 576 len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%u\t(%u%%)\n",
577 st->rxerr_decrypt, 577 st->rxerr_decrypt,
578 st->rx_all_count > 0 ? 578 st->rx_all_count > 0 ?
579 st->rxerr_decrypt*100/st->rx_all_count : 0); 579 st->rxerr_decrypt*100/st->rx_all_count : 0);
580 len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%d\t(%d%%)\n", 580 len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%u\t(%u%%)\n",
581 st->rxerr_mic, 581 st->rxerr_mic,
582 st->rx_all_count > 0 ? 582 st->rx_all_count > 0 ?
583 st->rxerr_mic*100/st->rx_all_count : 0); 583 st->rxerr_mic*100/st->rx_all_count : 0);
584 len += snprintf(buf+len, sizeof(buf)-len, "process\t%d\t(%d%%)\n", 584 len += snprintf(buf+len, sizeof(buf)-len, "process\t%u\t(%u%%)\n",
585 st->rxerr_proc, 585 st->rxerr_proc,
586 st->rx_all_count > 0 ? 586 st->rx_all_count > 0 ?
587 st->rxerr_proc*100/st->rx_all_count : 0); 587 st->rxerr_proc*100/st->rx_all_count : 0);
588 len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%d\t(%d%%)\n", 588 len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%u\t(%u%%)\n",
589 st->rxerr_jumbo, 589 st->rxerr_jumbo,
590 st->rx_all_count > 0 ? 590 st->rx_all_count > 0 ?
591 st->rxerr_jumbo*100/st->rx_all_count : 0); 591 st->rxerr_jumbo*100/st->rx_all_count : 0);
592 len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n", 592 len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%u]\n",
593 st->rx_all_count); 593 st->rx_all_count);
594 len += snprintf(buf+len, sizeof(buf)-len, "RX-all-bytes\t%d\n", 594 len += snprintf(buf+len, sizeof(buf)-len, "RX-all-bytes\t%u\n",
595 st->rx_bytes_count); 595 st->rx_bytes_count);
596 596
597 len += snprintf(buf+len, sizeof(buf)-len, 597 len += snprintf(buf+len, sizeof(buf)-len,
598 "\nTX\n---------------------\n"); 598 "\nTX\n---------------------\n");
599 len += snprintf(buf+len, sizeof(buf)-len, "retry\t%d\t(%d%%)\n", 599 len += snprintf(buf+len, sizeof(buf)-len, "retry\t%u\t(%u%%)\n",
600 st->txerr_retry, 600 st->txerr_retry,
601 st->tx_all_count > 0 ? 601 st->tx_all_count > 0 ?
602 st->txerr_retry*100/st->tx_all_count : 0); 602 st->txerr_retry*100/st->tx_all_count : 0);
603 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n", 603 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%u\t(%u%%)\n",
604 st->txerr_fifo, 604 st->txerr_fifo,
605 st->tx_all_count > 0 ? 605 st->tx_all_count > 0 ?
606 st->txerr_fifo*100/st->tx_all_count : 0); 606 st->txerr_fifo*100/st->tx_all_count : 0);
607 len += snprintf(buf+len, sizeof(buf)-len, "filter\t%d\t(%d%%)\n", 607 len += snprintf(buf+len, sizeof(buf)-len, "filter\t%u\t(%u%%)\n",
608 st->txerr_filt, 608 st->txerr_filt,
609 st->tx_all_count > 0 ? 609 st->tx_all_count > 0 ?
610 st->txerr_filt*100/st->tx_all_count : 0); 610 st->txerr_filt*100/st->tx_all_count : 0);
611 len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n", 611 len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%u]\n",
612 st->tx_all_count); 612 st->tx_all_count);
613 len += snprintf(buf+len, sizeof(buf)-len, "TX-all-bytes\t%d\n", 613 len += snprintf(buf+len, sizeof(buf)-len, "TX-all-bytes\t%u\n",
614 st->tx_bytes_count); 614 st->tx_bytes_count);
615 615
616 if (len > sizeof(buf)) 616 if (len > sizeof(buf))
@@ -719,7 +719,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
719 st->mib_intr); 719 st->mib_intr);
720 len += snprintf(buf+len, sizeof(buf)-len, 720 len += snprintf(buf+len, sizeof(buf)-len,
721 "beacon RSSI average:\t%d\n", 721 "beacon RSSI average:\t%d\n",
722 sc->ah->ah_beacon_rssi_avg.avg); 722 (int)ewma_read(&sc->ah->ah_beacon_rssi_avg));
723 723
724#define CC_PRINT(_struct, _field) \ 724#define CC_PRINT(_struct, _field) \
725 _struct._field, \ 725 _struct._field, \
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index b2adb2a281c..2509d0bf037 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -26,7 +26,7 @@
26struct ath5k_hw_rx_ctl { 26struct ath5k_hw_rx_ctl {
27 u32 rx_control_0; /* RX control word 0 */ 27 u32 rx_control_0; /* RX control word 0 */
28 u32 rx_control_1; /* RX control word 1 */ 28 u32 rx_control_1; /* RX control word 1 */
29} __packed; 29} __packed __aligned(4);
30 30
31/* RX control word 1 fields/flags */ 31/* RX control word 1 fields/flags */
32#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */ 32#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
@@ -39,7 +39,7 @@ struct ath5k_hw_rx_ctl {
39struct ath5k_hw_rx_status { 39struct ath5k_hw_rx_status {
40 u32 rx_status_0; /* RX status word 0 */ 40 u32 rx_status_0; /* RX status word 0 */
41 u32 rx_status_1; /* RX status word 1 */ 41 u32 rx_status_1; /* RX status word 1 */
42} __packed; 42} __packed __aligned(4);
43 43
44/* 5210/5211 */ 44/* 5210/5211 */
45/* RX status word 0 fields/flags */ 45/* RX status word 0 fields/flags */
@@ -129,7 +129,7 @@ enum ath5k_phy_error_code {
129struct ath5k_hw_2w_tx_ctl { 129struct ath5k_hw_2w_tx_ctl {
130 u32 tx_control_0; /* TX control word 0 */ 130 u32 tx_control_0; /* TX control word 0 */
131 u32 tx_control_1; /* TX control word 1 */ 131 u32 tx_control_1; /* TX control word 1 */
132} __packed; 132} __packed __aligned(4);
133 133
134/* TX control word 0 fields/flags */ 134/* TX control word 0 fields/flags */
135#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */ 135#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
@@ -185,7 +185,7 @@ struct ath5k_hw_4w_tx_ctl {
185 u32 tx_control_1; /* TX control word 1 */ 185 u32 tx_control_1; /* TX control word 1 */
186 u32 tx_control_2; /* TX control word 2 */ 186 u32 tx_control_2; /* TX control word 2 */
187 u32 tx_control_3; /* TX control word 3 */ 187 u32 tx_control_3; /* TX control word 3 */
188} __packed; 188} __packed __aligned(4);
189 189
190/* TX control word 0 fields/flags */ 190/* TX control word 0 fields/flags */
191#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */ 191#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
@@ -244,7 +244,7 @@ struct ath5k_hw_4w_tx_ctl {
244struct ath5k_hw_tx_status { 244struct ath5k_hw_tx_status {
245 u32 tx_status_0; /* TX status word 0 */ 245 u32 tx_status_0; /* TX status word 0 */
246 u32 tx_status_1; /* TX status word 1 */ 246 u32 tx_status_1; /* TX status word 1 */
247} __packed; 247} __packed __aligned(4);
248 248
249/* TX status word 0 fields/flags */ 249/* TX status word 0 fields/flags */
250#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */ 250#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */
@@ -282,7 +282,7 @@ struct ath5k_hw_tx_status {
282struct ath5k_hw_5210_tx_desc { 282struct ath5k_hw_5210_tx_desc {
283 struct ath5k_hw_2w_tx_ctl tx_ctl; 283 struct ath5k_hw_2w_tx_ctl tx_ctl;
284 struct ath5k_hw_tx_status tx_stat; 284 struct ath5k_hw_tx_status tx_stat;
285} __packed; 285} __packed __aligned(4);
286 286
287/* 287/*
288 * 5212 hardware TX descriptor 288 * 5212 hardware TX descriptor
@@ -290,7 +290,7 @@ struct ath5k_hw_5210_tx_desc {
290struct ath5k_hw_5212_tx_desc { 290struct ath5k_hw_5212_tx_desc {
291 struct ath5k_hw_4w_tx_ctl tx_ctl; 291 struct ath5k_hw_4w_tx_ctl tx_ctl;
292 struct ath5k_hw_tx_status tx_stat; 292 struct ath5k_hw_tx_status tx_stat;
293} __packed; 293} __packed __aligned(4);
294 294
295/* 295/*
296 * Common hardware RX descriptor 296 * Common hardware RX descriptor
@@ -298,7 +298,7 @@ struct ath5k_hw_5212_tx_desc {
298struct ath5k_hw_all_rx_desc { 298struct ath5k_hw_all_rx_desc {
299 struct ath5k_hw_rx_ctl rx_ctl; 299 struct ath5k_hw_rx_ctl rx_ctl;
300 struct ath5k_hw_rx_status rx_stat; 300 struct ath5k_hw_rx_status rx_stat;
301} __packed; 301} __packed __aligned(4);
302 302
303/* 303/*
304 * Atheros hardware DMA descriptor 304 * Atheros hardware DMA descriptor
@@ -313,7 +313,7 @@ struct ath5k_desc {
313 struct ath5k_hw_5212_tx_desc ds_tx5212; 313 struct ath5k_hw_5212_tx_desc ds_tx5212;
314 struct ath5k_hw_all_rx_desc ds_rx; 314 struct ath5k_hw_all_rx_desc ds_rx;
315 } ud; 315 } ud;
316} __packed; 316} __packed __aligned(4);
317 317
318#define AR5K_RXDESC_INTREQ 0x0020 318#define AR5K_RXDESC_INTREQ 0x0020
319 319
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 219367884e6..6b43f535ff5 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1102,18 +1102,12 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1102 PHY calibration 1102 PHY calibration
1103\*****************/ 1103\*****************/
1104 1104
1105static int sign_extend(int val, const int nbits)
1106{
1107 int order = BIT(nbits-1);
1108 return (val ^ order) - order;
1109}
1110
1111static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah) 1105static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
1112{ 1106{
1113 s32 val; 1107 s32 val;
1114 1108
1115 val = ath5k_hw_reg_read(ah, AR5K_PHY_NF); 1109 val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
1116 return sign_extend(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 9); 1110 return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
1117} 1111}
1118 1112
1119void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah) 1113void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 63ccb39cdcd..29a045da184 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -834,10 +834,10 @@ void ath9k_hw_ani_setup(struct ath_hw *ah)
834{ 834{
835 int i; 835 int i;
836 836
837 const int totalSizeDesired[] = { -55, -55, -55, -55, -62 }; 837 static const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
838 const int coarseHigh[] = { -14, -14, -14, -14, -12 }; 838 static const int coarseHigh[] = { -14, -14, -14, -14, -12 };
839 const int coarseLow[] = { -64, -64, -64, -64, -70 }; 839 static const int coarseLow[] = { -64, -64, -64, -64, -70 };
840 const int firpwr[] = { -78, -78, -78, -78, -80 }; 840 static const int firpwr[] = { -78, -78, -78, -78, -80 };
841 841
842 for (i = 0; i < 5; i++) { 842 for (i = 0; i < 5; i++) {
843 ah->totalSizeDesired[i] = totalSizeDesired[i]; 843 ah->totalSizeDesired[i] = totalSizeDesired[i];
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index ea9f4497f58..06e34d293dc 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -244,13 +244,15 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
244 int upper, lower, cur_vit_mask; 244 int upper, lower, cur_vit_mask;
245 int tmp, new; 245 int tmp, new;
246 int i; 246 int i;
247 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, 247 static int pilot_mask_reg[4] = {
248 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 248 AR_PHY_TIMING7, AR_PHY_TIMING8,
249 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
249 }; 250 };
250 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, 251 static int chan_mask_reg[4] = {
251 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 252 AR_PHY_TIMING9, AR_PHY_TIMING10,
253 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
252 }; 254 };
253 int inc[4] = { 0, 100, 0, 0 }; 255 static int inc[4] = { 0, 100, 0, 0 };
254 256
255 int8_t mask_m[123]; 257 int8_t mask_m[123];
256 int8_t mask_p[123]; 258 int8_t mask_p[123];
@@ -873,7 +875,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
873 channel->max_antenna_gain * 2, 875 channel->max_antenna_gain * 2,
874 channel->max_power * 2, 876 channel->max_power * 2,
875 min((u32) MAX_RATE_POWER, 877 min((u32) MAX_RATE_POWER,
876 (u32) regulatory->power_limit)); 878 (u32) regulatory->power_limit), false);
877 879
878 /* Write analog registers */ 880 /* Write analog registers */
879 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { 881 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
@@ -1084,12 +1086,12 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1084 break; 1086 break;
1085 } 1087 }
1086 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ 1088 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
1087 const int m1ThreshLow[] = { 127, 50 }; 1089 static const int m1ThreshLow[] = { 127, 50 };
1088 const int m2ThreshLow[] = { 127, 40 }; 1090 static const int m2ThreshLow[] = { 127, 40 };
1089 const int m1Thresh[] = { 127, 0x4d }; 1091 static const int m1Thresh[] = { 127, 0x4d };
1090 const int m2Thresh[] = { 127, 0x40 }; 1092 static const int m2Thresh[] = { 127, 0x40 };
1091 const int m2CountThr[] = { 31, 16 }; 1093 static const int m2CountThr[] = { 31, 16 };
1092 const int m2CountThrLow[] = { 63, 48 }; 1094 static const int m2CountThrLow[] = { 63, 48 };
1093 u32 on = param ? 1 : 0; 1095 u32 on = param ? 1 : 0;
1094 1096
1095 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, 1097 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
@@ -1141,7 +1143,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1141 break; 1143 break;
1142 } 1144 }
1143 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{ 1145 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
1144 const int weakSigThrCck[] = { 8, 6 }; 1146 static const int weakSigThrCck[] = { 8, 6 };
1145 u32 high = param ? 1 : 0; 1147 u32 high = param ? 1 : 0;
1146 1148
1147 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT, 1149 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
@@ -1157,7 +1159,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1157 break; 1159 break;
1158 } 1160 }
1159 case ATH9K_ANI_FIRSTEP_LEVEL:{ 1161 case ATH9K_ANI_FIRSTEP_LEVEL:{
1160 const int firstep[] = { 0, 4, 8 }; 1162 static const int firstep[] = { 0, 4, 8 };
1161 u32 level = param; 1163 u32 level = param;
1162 1164
1163 if (level >= ARRAY_SIZE(firstep)) { 1165 if (level >= ARRAY_SIZE(firstep)) {
@@ -1178,7 +1180,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1178 break; 1180 break;
1179 } 1181 }
1180 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{ 1182 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
1181 const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 }; 1183 static const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
1182 u32 level = param; 1184 u32 level = param;
1183 1185
1184 if (level >= ARRAY_SIZE(cycpwrThr1)) { 1186 if (level >= ARRAY_SIZE(cycpwrThr1)) {
@@ -1490,25 +1492,25 @@ static void ar5008_hw_do_getnf(struct ath_hw *ah,
1490 int16_t nf; 1492 int16_t nf;
1491 1493
1492 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR); 1494 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
1493 nfarray[0] = sign_extend(nf, 9); 1495 nfarray[0] = sign_extend32(nf, 8);
1494 1496
1495 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR); 1497 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
1496 nfarray[1] = sign_extend(nf, 9); 1498 nfarray[1] = sign_extend32(nf, 8);
1497 1499
1498 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR); 1500 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
1499 nfarray[2] = sign_extend(nf, 9); 1501 nfarray[2] = sign_extend32(nf, 8);
1500 1502
1501 if (!IS_CHAN_HT40(ah->curchan)) 1503 if (!IS_CHAN_HT40(ah->curchan))
1502 return; 1504 return;
1503 1505
1504 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); 1506 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
1505 nfarray[3] = sign_extend(nf, 9); 1507 nfarray[3] = sign_extend32(nf, 8);
1506 1508
1507 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR); 1509 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
1508 nfarray[4] = sign_extend(nf, 9); 1510 nfarray[4] = sign_extend32(nf, 8);
1509 1511
1510 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR); 1512 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
1511 nfarray[5] = sign_extend(nf, 9); 1513 nfarray[5] = sign_extend32(nf, 8);
1512} 1514}
1513 1515
1514/* 1516/*
@@ -1579,10 +1581,55 @@ static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
1579 ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_5416_5GHZ; 1581 ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_5416_5GHZ;
1580} 1582}
1581 1583
1584static void ar5008_hw_set_radar_params(struct ath_hw *ah,
1585 struct ath_hw_radar_conf *conf)
1586{
1587 u32 radar_0 = 0, radar_1 = 0;
1588
1589 if (!conf) {
1590 REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
1591 return;
1592 }
1593
1594 radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
1595 radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
1596 radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
1597 radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
1598 radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
1599 radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
1600
1601 radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
1602 radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
1603 radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
1604 radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
1605 radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
1606
1607 REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
1608 REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
1609 if (conf->ext_channel)
1610 REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
1611 else
1612 REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
1613}
1614
1615static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
1616{
1617 struct ath_hw_radar_conf *conf = &ah->radar_conf;
1618
1619 conf->fir_power = -33;
1620 conf->radar_rssi = 20;
1621 conf->pulse_height = 10;
1622 conf->pulse_rssi = 24;
1623 conf->pulse_inband = 15;
1624 conf->pulse_maxlen = 255;
1625 conf->pulse_inband_step = 12;
1626 conf->radar_inband = 8;
1627}
1628
1582void ar5008_hw_attach_phy_ops(struct ath_hw *ah) 1629void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1583{ 1630{
1584 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 1631 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1585 const u32 ar5416_cca_regs[6] = { 1632 static const u32 ar5416_cca_regs[6] = {
1586 AR_PHY_CCA, 1633 AR_PHY_CCA,
1587 AR_PHY_CH1_CCA, 1634 AR_PHY_CH1_CCA,
1588 AR_PHY_CH2_CCA, 1635 AR_PHY_CH2_CCA,
@@ -1609,6 +1656,7 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1609 priv_ops->restore_chainmask = ar5008_restore_chainmask; 1656 priv_ops->restore_chainmask = ar5008_restore_chainmask;
1610 priv_ops->set_diversity = ar5008_set_diversity; 1657 priv_ops->set_diversity = ar5008_set_diversity;
1611 priv_ops->do_getnf = ar5008_hw_do_getnf; 1658 priv_ops->do_getnf = ar5008_hw_do_getnf;
1659 priv_ops->set_radar_params = ar5008_hw_set_radar_params;
1612 1660
1613 if (modparam_force_new_ani) { 1661 if (modparam_force_new_ani) {
1614 priv_ops->ani_control = ar5008_hw_ani_control_new; 1662 priv_ops->ani_control = ar5008_hw_ani_control_new;
@@ -1624,5 +1672,6 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1624 priv_ops->compute_pll_control = ar5008_hw_compute_pll_control; 1672 priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
1625 1673
1626 ar5008_hw_set_nf_limits(ah); 1674 ar5008_hw_set_nf_limits(ah);
1675 ar5008_hw_set_radar_conf(ah);
1627 memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs)); 1676 memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs));
1628} 1677}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 50dda394f8b..f0268e5eab3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -90,13 +90,10 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
90 90
91 *masked = isr & ATH9K_INT_COMMON; 91 *masked = isr & ATH9K_INT_COMMON;
92 92
93 if (ah->config.rx_intr_mitigation) { 93 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM |
94 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) 94 AR_ISR_RXOK | AR_ISR_RXERR))
95 *masked |= ATH9K_INT_RX;
96 }
97
98 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
99 *masked |= ATH9K_INT_RX; 95 *masked |= ATH9K_INT_RX;
96
100 if (isr & 97 if (isr &
101 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR | 98 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
102 AR_ISR_TXEOL)) { 99 AR_ISR_TXEOL)) {
@@ -118,14 +115,6 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
118 "receive FIFO overrun interrupt\n"); 115 "receive FIFO overrun interrupt\n");
119 } 116 }
120 117
121 if (!AR_SREV_9100(ah)) {
122 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
123 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
124 if (isr5 & AR_ISR_S5_TIM_TIMER)
125 *masked |= ATH9K_INT_TIM_TIMER;
126 }
127 }
128
129 *masked |= mask2; 118 *masked |= mask2;
130 } 119 }
131 120
@@ -136,17 +125,18 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
136 u32 s5_s; 125 u32 s5_s;
137 126
138 s5_s = REG_READ(ah, AR_ISR_S5_S); 127 s5_s = REG_READ(ah, AR_ISR_S5_S);
139 if (isr & AR_ISR_GENTMR) { 128 ah->intr_gen_timer_trigger =
140 ah->intr_gen_timer_trigger =
141 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); 129 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
142 130
143 ah->intr_gen_timer_thresh = 131 ah->intr_gen_timer_thresh =
144 MS(s5_s, AR_ISR_S5_GENTIMER_THRESH); 132 MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
145 133
146 if (ah->intr_gen_timer_trigger) 134 if (ah->intr_gen_timer_trigger)
147 *masked |= ATH9K_INT_GENTIMER; 135 *masked |= ATH9K_INT_GENTIMER;
148 136
149 } 137 if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
138 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
139 *masked |= ATH9K_INT_TIM_TIMER;
150 } 140 }
151 141
152 if (sync_cause) { 142 if (sync_cause) {
@@ -218,77 +208,70 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
218 struct ath_tx_status *ts) 208 struct ath_tx_status *ts)
219{ 209{
220 struct ar5416_desc *ads = AR5416DESC(ds); 210 struct ar5416_desc *ads = AR5416DESC(ds);
211 u32 status;
221 212
222 if ((ads->ds_txstatus9 & AR_TxDone) == 0) 213 status = ACCESS_ONCE(ads->ds_txstatus9);
214 if ((status & AR_TxDone) == 0)
223 return -EINPROGRESS; 215 return -EINPROGRESS;
224 216
225 ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
226 ts->ts_tstamp = ads->AR_SendTimestamp; 217 ts->ts_tstamp = ads->AR_SendTimestamp;
227 ts->ts_status = 0; 218 ts->ts_status = 0;
228 ts->ts_flags = 0; 219 ts->ts_flags = 0;
229 220
230 if (ads->ds_txstatus1 & AR_FrmXmitOK) 221 if (status & AR_TxOpExceeded)
222 ts->ts_status |= ATH9K_TXERR_XTXOP;
223 ts->tid = MS(status, AR_TxTid);
224 ts->ts_rateindex = MS(status, AR_FinalTxIdx);
225 ts->ts_seqnum = MS(status, AR_SeqNum);
226
227 status = ACCESS_ONCE(ads->ds_txstatus0);
228 ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
229 ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
230 ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
231 if (status & AR_TxBaStatus) {
232 ts->ts_flags |= ATH9K_TX_BA;
233 ts->ba_low = ads->AR_BaBitmapLow;
234 ts->ba_high = ads->AR_BaBitmapHigh;
235 }
236
237 status = ACCESS_ONCE(ads->ds_txstatus1);
238 if (status & AR_FrmXmitOK)
231 ts->ts_status |= ATH9K_TX_ACKED; 239 ts->ts_status |= ATH9K_TX_ACKED;
232 if (ads->ds_txstatus1 & AR_ExcessiveRetries) 240 else {
233 ts->ts_status |= ATH9K_TXERR_XRETRY; 241 if (status & AR_ExcessiveRetries)
234 if (ads->ds_txstatus1 & AR_Filtered) 242 ts->ts_status |= ATH9K_TXERR_XRETRY;
235 ts->ts_status |= ATH9K_TXERR_FILT; 243 if (status & AR_Filtered)
236 if (ads->ds_txstatus1 & AR_FIFOUnderrun) { 244 ts->ts_status |= ATH9K_TXERR_FILT;
237 ts->ts_status |= ATH9K_TXERR_FIFO; 245 if (status & AR_FIFOUnderrun) {
238 ath9k_hw_updatetxtriglevel(ah, true); 246 ts->ts_status |= ATH9K_TXERR_FIFO;
247 ath9k_hw_updatetxtriglevel(ah, true);
248 }
239 } 249 }
240 if (ads->ds_txstatus9 & AR_TxOpExceeded) 250 if (status & AR_TxTimerExpired)
241 ts->ts_status |= ATH9K_TXERR_XTXOP;
242 if (ads->ds_txstatus1 & AR_TxTimerExpired)
243 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED; 251 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
244 252 if (status & AR_DescCfgErr)
245 if (ads->ds_txstatus1 & AR_DescCfgErr)
246 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR; 253 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
247 if (ads->ds_txstatus1 & AR_TxDataUnderrun) { 254 if (status & AR_TxDataUnderrun) {
248 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN; 255 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
249 ath9k_hw_updatetxtriglevel(ah, true); 256 ath9k_hw_updatetxtriglevel(ah, true);
250 } 257 }
251 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) { 258 if (status & AR_TxDelimUnderrun) {
252 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN; 259 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
253 ath9k_hw_updatetxtriglevel(ah, true); 260 ath9k_hw_updatetxtriglevel(ah, true);
254 } 261 }
255 if (ads->ds_txstatus0 & AR_TxBaStatus) { 262 ts->ts_shortretry = MS(status, AR_RTSFailCnt);
256 ts->ts_flags |= ATH9K_TX_BA; 263 ts->ts_longretry = MS(status, AR_DataFailCnt);
257 ts->ba_low = ads->AR_BaBitmapLow; 264 ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
258 ts->ba_high = ads->AR_BaBitmapHigh;
259 }
260 265
261 ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx); 266 status = ACCESS_ONCE(ads->ds_txstatus5);
262 switch (ts->ts_rateindex) { 267 ts->ts_rssi = MS(status, AR_TxRSSICombined);
263 case 0: 268 ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
264 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0); 269 ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
265 break; 270 ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
266 case 1:
267 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
268 break;
269 case 2:
270 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
271 break;
272 case 3:
273 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
274 break;
275 }
276 271
277 ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
278 ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
279 ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
280 ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
281 ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
282 ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
283 ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
284 ts->evm0 = ads->AR_TxEVM0; 272 ts->evm0 = ads->AR_TxEVM0;
285 ts->evm1 = ads->AR_TxEVM1; 273 ts->evm1 = ads->AR_TxEVM1;
286 ts->evm2 = ads->AR_TxEVM2; 274 ts->evm2 = ads->AR_TxEVM2;
287 ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
288 ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
289 ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
290 ts->tid = MS(ads->ds_txstatus9, AR_TxTid);
291 ts->ts_antenna = 0;
292 275
293 return 0; 276 return 0;
294} 277}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index c00cdc67b55..7ae66a889f5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -175,13 +175,15 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
175 int upper, lower, cur_vit_mask; 175 int upper, lower, cur_vit_mask;
176 int tmp, newVal; 176 int tmp, newVal;
177 int i; 177 int i;
178 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, 178 static const int pilot_mask_reg[4] = {
179 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 179 AR_PHY_TIMING7, AR_PHY_TIMING8,
180 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
180 }; 181 };
181 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, 182 static const int chan_mask_reg[4] = {
182 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 183 AR_PHY_TIMING9, AR_PHY_TIMING10,
184 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
183 }; 185 };
184 int inc[4] = { 0, 100, 0, 0 }; 186 static const int inc[4] = { 0, 100, 0, 0 };
185 struct chan_centers centers; 187 struct chan_centers centers;
186 188
187 int8_t mask_m[123]; 189 int8_t mask_m[123];
@@ -473,21 +475,21 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
473 int16_t nf; 475 int16_t nf;
474 476
475 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR); 477 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
476 nfarray[0] = sign_extend(nf, 9); 478 nfarray[0] = sign_extend32(nf, 8);
477 479
478 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR); 480 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
479 if (IS_CHAN_HT40(ah->curchan)) 481 if (IS_CHAN_HT40(ah->curchan))
480 nfarray[3] = sign_extend(nf, 9); 482 nfarray[3] = sign_extend32(nf, 8);
481 483
482 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) 484 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
483 return; 485 return;
484 486
485 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR); 487 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
486 nfarray[1] = sign_extend(nf, 9); 488 nfarray[1] = sign_extend32(nf, 8);
487 489
488 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR); 490 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR);
489 if (IS_CHAN_HT40(ah->curchan)) 491 if (IS_CHAN_HT40(ah->curchan))
490 nfarray[4] = sign_extend(nf, 9); 492 nfarray[4] = sign_extend32(nf, 8);
491} 493}
492 494
493static void ar9002_hw_set_nf_limits(struct ath_hw *ah) 495static void ar9002_hw_set_nf_limits(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 9e6edffe0bd..4c94c9ed5f8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -196,7 +196,7 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
196 u32 qCoffDenom, iCoffDenom; 196 u32 qCoffDenom, iCoffDenom;
197 int32_t qCoff, iCoff; 197 int32_t qCoff, iCoff;
198 int iqCorrNeg, i; 198 int iqCorrNeg, i;
199 const u_int32_t offset_array[3] = { 199 static const u_int32_t offset_array[3] = {
200 AR_PHY_RX_IQCAL_CORR_B0, 200 AR_PHY_RX_IQCAL_CORR_B0,
201 AR_PHY_RX_IQCAL_CORR_B1, 201 AR_PHY_RX_IQCAL_CORR_B1,
202 AR_PHY_RX_IQCAL_CORR_B2, 202 AR_PHY_RX_IQCAL_CORR_B2,
@@ -603,22 +603,22 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
603static void ar9003_hw_tx_iq_cal(struct ath_hw *ah) 603static void ar9003_hw_tx_iq_cal(struct ath_hw *ah)
604{ 604{
605 struct ath_common *common = ath9k_hw_common(ah); 605 struct ath_common *common = ath9k_hw_common(ah);
606 const u32 txiqcal_status[AR9300_MAX_CHAINS] = { 606 static const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
607 AR_PHY_TX_IQCAL_STATUS_B0, 607 AR_PHY_TX_IQCAL_STATUS_B0,
608 AR_PHY_TX_IQCAL_STATUS_B1, 608 AR_PHY_TX_IQCAL_STATUS_B1,
609 AR_PHY_TX_IQCAL_STATUS_B2, 609 AR_PHY_TX_IQCAL_STATUS_B2,
610 }; 610 };
611 const u32 tx_corr_coeff[AR9300_MAX_CHAINS] = { 611 static const u32 tx_corr_coeff[AR9300_MAX_CHAINS] = {
612 AR_PHY_TX_IQCAL_CORR_COEFF_01_B0, 612 AR_PHY_TX_IQCAL_CORR_COEFF_01_B0,
613 AR_PHY_TX_IQCAL_CORR_COEFF_01_B1, 613 AR_PHY_TX_IQCAL_CORR_COEFF_01_B1,
614 AR_PHY_TX_IQCAL_CORR_COEFF_01_B2, 614 AR_PHY_TX_IQCAL_CORR_COEFF_01_B2,
615 }; 615 };
616 const u32 rx_corr[AR9300_MAX_CHAINS] = { 616 static const u32 rx_corr[AR9300_MAX_CHAINS] = {
617 AR_PHY_RX_IQCAL_CORR_B0, 617 AR_PHY_RX_IQCAL_CORR_B0,
618 AR_PHY_RX_IQCAL_CORR_B1, 618 AR_PHY_RX_IQCAL_CORR_B1,
619 AR_PHY_RX_IQCAL_CORR_B2, 619 AR_PHY_RX_IQCAL_CORR_B2,
620 }; 620 };
621 const u_int32_t chan_info_tab[] = { 621 static const u_int32_t chan_info_tab[] = {
622 AR_PHY_CHAN_INFO_TAB_0, 622 AR_PHY_CHAN_INFO_TAB_0,
623 AR_PHY_CHAN_INFO_TAB_1, 623 AR_PHY_CHAN_INFO_TAB_1,
624 AR_PHY_CHAN_INFO_TAB_2, 624 AR_PHY_CHAN_INFO_TAB_2,
@@ -718,12 +718,19 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
718 struct ath9k_channel *chan) 718 struct ath9k_channel *chan)
719{ 719{
720 struct ath_common *common = ath9k_hw_common(ah); 720 struct ath_common *common = ath9k_hw_common(ah);
721 int val;
721 722
722 /* 723 val = REG_READ(ah, AR_ENT_OTP);
723 * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain mode before 724 ath_print(common, ATH_DBG_CALIBRATE, "ath9k: AR_ENT_OTP 0x%x\n", val);
724 * running AGC/TxIQ cals 725
725 */ 726 if (val & AR_ENT_OTP_CHAIN2_DISABLE)
726 ar9003_hw_set_chain_masks(ah, 0x7, 0x7); 727 ar9003_hw_set_chain_masks(ah, 0x3, 0x3);
728 else
729 /*
730 * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain
731 * mode before running AGC/TxIQ cals
732 */
733 ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
727 734
728 /* Do Tx IQ Calibration */ 735 /* Do Tx IQ Calibration */
729 ar9003_hw_tx_iq_cal(ah); 736 ar9003_hw_tx_iq_cal(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index a7b82f0085d..f2eec388693 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -22,12 +22,14 @@
22#define COMP_CKSUM_LEN 2 22#define COMP_CKSUM_LEN 2
23 23
24#define AR_CH0_TOP (0x00016288) 24#define AR_CH0_TOP (0x00016288)
25#define AR_CH0_TOP_XPABIASLVL (0x3) 25#define AR_CH0_TOP_XPABIASLVL (0x300)
26#define AR_CH0_TOP_XPABIASLVL_S (8) 26#define AR_CH0_TOP_XPABIASLVL_S (8)
27 27
28#define AR_CH0_THERM (0x00016290) 28#define AR_CH0_THERM (0x00016290)
29#define AR_CH0_THERM_SPARE (0x3f) 29#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
30#define AR_CH0_THERM_SPARE_S (0) 30#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
31#define AR_CH0_THERM_XPASHORT2GND 0x4
32#define AR_CH0_THERM_XPASHORT2GND_S 2
31 33
32#define AR_SWITCH_TABLE_COM_ALL (0xffff) 34#define AR_SWITCH_TABLE_COM_ALL (0xffff)
33#define AR_SWITCH_TABLE_COM_ALL_S (0) 35#define AR_SWITCH_TABLE_COM_ALL_S (0)
@@ -55,6 +57,9 @@
55#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ 57#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */
56#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ 58#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */
57 59
60static int ar9003_hw_power_interpolate(int32_t x,
61 int32_t *px, int32_t *py, u_int16_t np);
62
58#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6)) 63#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6))
59 64
60static const struct ar9300_eeprom ar9300_default = { 65static const struct ar9300_eeprom ar9300_default = {
@@ -146,13 +151,16 @@ static const struct ar9300_eeprom ar9300_default = {
146 .txEndToRxOn = 0x2, 151 .txEndToRxOn = 0x2,
147 .txFrameToXpaOn = 0xe, 152 .txFrameToXpaOn = 0xe,
148 .thresh62 = 28, 153 .thresh62 = 28,
149 .papdRateMaskHt20 = LE32(0x80c080), 154 .papdRateMaskHt20 = LE32(0x0cf0e0e0),
150 .papdRateMaskHt40 = LE32(0x80c080), 155 .papdRateMaskHt40 = LE32(0x6cf0e0e0),
151 .futureModal = { 156 .futureModal = {
152 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 157 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
153 0, 0, 0, 0, 0, 0, 0, 0
154 }, 158 },
155 }, 159 },
160 .base_ext1 = {
161 .ant_div_control = 0,
162 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
163 },
156 .calFreqPier2G = { 164 .calFreqPier2G = {
157 FREQ2FBIN(2412, 1), 165 FREQ2FBIN(2412, 1),
158 FREQ2FBIN(2437, 1), 166 FREQ2FBIN(2437, 1),
@@ -287,8 +295,7 @@ static const struct ar9300_eeprom ar9300_default = {
287 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), 295 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
288 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), 296 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
289 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), 297 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
290 /* Data[11].ctlEdges[3].bChannel */ 298 /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
291 FREQ2FBIN(2462, 1),
292 } 299 }
293 }, 300 },
294 .ctlPowerData_2G = { 301 .ctlPowerData_2G = {
@@ -346,13 +353,20 @@ static const struct ar9300_eeprom ar9300_default = {
346 .txEndToRxOn = 0x2, 353 .txEndToRxOn = 0x2,
347 .txFrameToXpaOn = 0xe, 354 .txFrameToXpaOn = 0xe,
348 .thresh62 = 28, 355 .thresh62 = 28,
349 .papdRateMaskHt20 = LE32(0xf0e0e0), 356 .papdRateMaskHt20 = LE32(0x0c80c080),
350 .papdRateMaskHt40 = LE32(0xf0e0e0), 357 .papdRateMaskHt40 = LE32(0x0080c080),
351 .futureModal = { 358 .futureModal = {
352 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 359 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
353 0, 0, 0, 0, 0, 0, 0, 0
354 }, 360 },
355 }, 361 },
362 .base_ext2 = {
363 .tempSlopeLow = 0,
364 .tempSlopeHigh = 0,
365 .xatten1DBLow = {0, 0, 0},
366 .xatten1MarginLow = {0, 0, 0},
367 .xatten1DBHigh = {0, 0, 0},
368 .xatten1MarginHigh = {0, 0, 0}
369 },
356 .calFreqPier5G = { 370 .calFreqPier5G = {
357 FREQ2FBIN(5180, 0), 371 FREQ2FBIN(5180, 0),
358 FREQ2FBIN(5220, 0), 372 FREQ2FBIN(5220, 0),
@@ -626,6 +640,2338 @@ static const struct ar9300_eeprom ar9300_default = {
626 } 640 }
627}; 641};
628 642
643static const struct ar9300_eeprom ar9300_x113 = {
644 .eepromVersion = 2,
645 .templateVersion = 6,
646 .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
647 .custData = {"x113-023-f0000"},
648 .baseEepHeader = {
649 .regDmn = { LE16(0), LE16(0x1f) },
650 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
651 .opCapFlags = {
652 .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
653 .eepMisc = 0,
654 },
655 .rfSilent = 0,
656 .blueToothOptions = 0,
657 .deviceCap = 0,
658 .deviceType = 5, /* takes lower byte in eeprom location */
659 .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
660 .params_for_tuning_caps = {0, 0},
661 .featureEnable = 0x0d,
662 /*
663 * bit0 - enable tx temp comp - disabled
664 * bit1 - enable tx volt comp - disabled
665 * bit2 - enable fastClock - enabled
666 * bit3 - enable doubling - enabled
667 * bit4 - enable internal regulator - disabled
668 * bit5 - enable pa predistortion - disabled
669 */
670 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
671 .eepromWriteEnableGpio = 6,
672 .wlanDisableGpio = 0,
673 .wlanLedGpio = 8,
674 .rxBandSelectGpio = 0xff,
675 .txrxgain = 0x21,
676 .swreg = 0,
677 },
678 .modalHeader2G = {
679 /* ar9300_modal_eep_header 2g */
680 /* 4 idle,t1,t2,b(4 bits per setting) */
681 .antCtrlCommon = LE32(0x110),
682 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
683 .antCtrlCommon2 = LE32(0x44444),
684
685 /*
686 * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
687 * rx1, rx12, b (2 bits each)
688 */
689 .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
690
691 /*
692 * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
693 * for ar9280 (0xa20c/b20c 5:0)
694 */
695 .xatten1DB = {0, 0, 0},
696
697 /*
698 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
699 * for ar9280 (0xa20c/b20c 16:12
700 */
701 .xatten1Margin = {0, 0, 0},
702 .tempSlope = 25,
703 .voltSlope = 0,
704
705 /*
706 * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
707 * channels in usual fbin coding format
708 */
709 .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
710
711 /*
712 * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
713 * if the register is per chain
714 */
715 .noiseFloorThreshCh = {-1, 0, 0},
716 .ob = {1, 1, 1},/* 3 chain */
717 .db_stage2 = {1, 1, 1}, /* 3 chain */
718 .db_stage3 = {0, 0, 0},
719 .db_stage4 = {0, 0, 0},
720 .xpaBiasLvl = 0,
721 .txFrameToDataStart = 0x0e,
722 .txFrameToPaOn = 0x0e,
723 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
724 .antennaGain = 0,
725 .switchSettling = 0x2c,
726 .adcDesiredSize = -30,
727 .txEndToXpaOff = 0,
728 .txEndToRxOn = 0x2,
729 .txFrameToXpaOn = 0xe,
730 .thresh62 = 28,
731 .papdRateMaskHt20 = LE32(0x0c80c080),
732 .papdRateMaskHt40 = LE32(0x0080c080),
733 .futureModal = {
734 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
735 },
736 },
737 .base_ext1 = {
738 .ant_div_control = 0,
739 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
740 },
741 .calFreqPier2G = {
742 FREQ2FBIN(2412, 1),
743 FREQ2FBIN(2437, 1),
744 FREQ2FBIN(2472, 1),
745 },
746 /* ar9300_cal_data_per_freq_op_loop 2g */
747 .calPierData2G = {
748 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
749 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
750 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
751 },
752 .calTarget_freqbin_Cck = {
753 FREQ2FBIN(2412, 1),
754 FREQ2FBIN(2472, 1),
755 },
756 .calTarget_freqbin_2G = {
757 FREQ2FBIN(2412, 1),
758 FREQ2FBIN(2437, 1),
759 FREQ2FBIN(2472, 1)
760 },
761 .calTarget_freqbin_2GHT20 = {
762 FREQ2FBIN(2412, 1),
763 FREQ2FBIN(2437, 1),
764 FREQ2FBIN(2472, 1)
765 },
766 .calTarget_freqbin_2GHT40 = {
767 FREQ2FBIN(2412, 1),
768 FREQ2FBIN(2437, 1),
769 FREQ2FBIN(2472, 1)
770 },
771 .calTargetPowerCck = {
772 /* 1L-5L,5S,11L,11S */
773 { {34, 34, 34, 34} },
774 { {34, 34, 34, 34} },
775 },
776 .calTargetPower2G = {
777 /* 6-24,36,48,54 */
778 { {34, 34, 32, 32} },
779 { {34, 34, 32, 32} },
780 { {34, 34, 32, 32} },
781 },
782 .calTargetPower2GHT20 = {
783 { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
784 { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
785 { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
786 },
787 .calTargetPower2GHT40 = {
788 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
789 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
790 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
791 },
792 .ctlIndex_2G = {
793 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
794 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
795 },
796 .ctl_freqbin_2G = {
797 {
798 FREQ2FBIN(2412, 1),
799 FREQ2FBIN(2417, 1),
800 FREQ2FBIN(2457, 1),
801 FREQ2FBIN(2462, 1)
802 },
803 {
804 FREQ2FBIN(2412, 1),
805 FREQ2FBIN(2417, 1),
806 FREQ2FBIN(2462, 1),
807 0xFF,
808 },
809
810 {
811 FREQ2FBIN(2412, 1),
812 FREQ2FBIN(2417, 1),
813 FREQ2FBIN(2462, 1),
814 0xFF,
815 },
816 {
817 FREQ2FBIN(2422, 1),
818 FREQ2FBIN(2427, 1),
819 FREQ2FBIN(2447, 1),
820 FREQ2FBIN(2452, 1)
821 },
822
823 {
824 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
825 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
826 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
827 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
828 },
829
830 {
831 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
832 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
833 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
834 0,
835 },
836
837 {
838 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
839 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
840 FREQ2FBIN(2472, 1),
841 0,
842 },
843
844 {
845 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
846 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
847 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
848 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
849 },
850
851 {
852 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
853 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
854 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
855 },
856
857 {
858 /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
859 /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
860 /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
861 0
862 },
863
864 {
865 /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
866 /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
867 /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
868 0
869 },
870
871 {
872 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
873 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
874 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
875 /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
876 }
877 },
878 .ctlPowerData_2G = {
879 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
880 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
881 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
882
883 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
884 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
885 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
886
887 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
888 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
889 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
890
891 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
892 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
893 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
894 },
895 .modalHeader5G = {
896 /* 4 idle,t1,t2,b (4 bits per setting) */
897 .antCtrlCommon = LE32(0x220),
898 /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
899 .antCtrlCommon2 = LE32(0x11111),
900 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
901 .antCtrlChain = {
902 LE16(0x150), LE16(0x150), LE16(0x150),
903 },
904 /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
905 .xatten1DB = {0, 0, 0},
906
907 /*
908 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
909 * for merlin (0xa20c/b20c 16:12
910 */
911 .xatten1Margin = {0, 0, 0},
912 .tempSlope = 68,
913 .voltSlope = 0,
914 /* spurChans spur channels in usual fbin coding format */
915 .spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
916 /* noiseFloorThreshCh Check if the register is per chain */
917 .noiseFloorThreshCh = {-1, 0, 0},
918 .ob = {3, 3, 3}, /* 3 chain */
919 .db_stage2 = {3, 3, 3}, /* 3 chain */
920 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
921 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
922 .xpaBiasLvl = 0,
923 .txFrameToDataStart = 0x0e,
924 .txFrameToPaOn = 0x0e,
925 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
926 .antennaGain = 0,
927 .switchSettling = 0x2d,
928 .adcDesiredSize = -30,
929 .txEndToXpaOff = 0,
930 .txEndToRxOn = 0x2,
931 .txFrameToXpaOn = 0xe,
932 .thresh62 = 28,
933 .papdRateMaskHt20 = LE32(0x0cf0e0e0),
934 .papdRateMaskHt40 = LE32(0x6cf0e0e0),
935 .futureModal = {
936 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
937 },
938 },
939 .base_ext2 = {
940 .tempSlopeLow = 72,
941 .tempSlopeHigh = 105,
942 .xatten1DBLow = {0, 0, 0},
943 .xatten1MarginLow = {0, 0, 0},
944 .xatten1DBHigh = {0, 0, 0},
945 .xatten1MarginHigh = {0, 0, 0}
946 },
947 .calFreqPier5G = {
948 FREQ2FBIN(5180, 0),
949 FREQ2FBIN(5240, 0),
950 FREQ2FBIN(5320, 0),
951 FREQ2FBIN(5400, 0),
952 FREQ2FBIN(5500, 0),
953 FREQ2FBIN(5600, 0),
954 FREQ2FBIN(5745, 0),
955 FREQ2FBIN(5785, 0)
956 },
957 .calPierData5G = {
958 {
959 {0, 0, 0, 0, 0},
960 {0, 0, 0, 0, 0},
961 {0, 0, 0, 0, 0},
962 {0, 0, 0, 0, 0},
963 {0, 0, 0, 0, 0},
964 {0, 0, 0, 0, 0},
965 {0, 0, 0, 0, 0},
966 {0, 0, 0, 0, 0},
967 },
968 {
969 {0, 0, 0, 0, 0},
970 {0, 0, 0, 0, 0},
971 {0, 0, 0, 0, 0},
972 {0, 0, 0, 0, 0},
973 {0, 0, 0, 0, 0},
974 {0, 0, 0, 0, 0},
975 {0, 0, 0, 0, 0},
976 {0, 0, 0, 0, 0},
977 },
978 {
979 {0, 0, 0, 0, 0},
980 {0, 0, 0, 0, 0},
981 {0, 0, 0, 0, 0},
982 {0, 0, 0, 0, 0},
983 {0, 0, 0, 0, 0},
984 {0, 0, 0, 0, 0},
985 {0, 0, 0, 0, 0},
986 {0, 0, 0, 0, 0},
987 },
988
989 },
990 .calTarget_freqbin_5G = {
991 FREQ2FBIN(5180, 0),
992 FREQ2FBIN(5220, 0),
993 FREQ2FBIN(5320, 0),
994 FREQ2FBIN(5400, 0),
995 FREQ2FBIN(5500, 0),
996 FREQ2FBIN(5600, 0),
997 FREQ2FBIN(5745, 0),
998 FREQ2FBIN(5785, 0)
999 },
1000 .calTarget_freqbin_5GHT20 = {
1001 FREQ2FBIN(5180, 0),
1002 FREQ2FBIN(5240, 0),
1003 FREQ2FBIN(5320, 0),
1004 FREQ2FBIN(5400, 0),
1005 FREQ2FBIN(5500, 0),
1006 FREQ2FBIN(5700, 0),
1007 FREQ2FBIN(5745, 0),
1008 FREQ2FBIN(5825, 0)
1009 },
1010 .calTarget_freqbin_5GHT40 = {
1011 FREQ2FBIN(5190, 0),
1012 FREQ2FBIN(5230, 0),
1013 FREQ2FBIN(5320, 0),
1014 FREQ2FBIN(5410, 0),
1015 FREQ2FBIN(5510, 0),
1016 FREQ2FBIN(5670, 0),
1017 FREQ2FBIN(5755, 0),
1018 FREQ2FBIN(5825, 0)
1019 },
1020 .calTargetPower5G = {
1021 /* 6-24,36,48,54 */
1022 { {42, 40, 40, 34} },
1023 { {42, 40, 40, 34} },
1024 { {42, 40, 40, 34} },
1025 { {42, 40, 40, 34} },
1026 { {42, 40, 40, 34} },
1027 { {42, 40, 40, 34} },
1028 { {42, 40, 40, 34} },
1029 { {42, 40, 40, 34} },
1030 },
1031 .calTargetPower5GHT20 = {
1032 /*
1033 * 0_8_16,1-3_9-11_17-19,
1034 * 4,5,6,7,12,13,14,15,20,21,22,23
1035 */
1036 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1037 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1038 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1039 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1040 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1041 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1042 { {38, 38, 38, 38, 32, 28, 38, 38, 32, 28, 38, 38, 32, 26} },
1043 { {36, 36, 36, 36, 32, 28, 36, 36, 32, 28, 36, 36, 32, 26} },
1044 },
1045 .calTargetPower5GHT40 = {
1046 /*
1047 * 0_8_16,1-3_9-11_17-19,
1048 * 4,5,6,7,12,13,14,15,20,21,22,23
1049 */
1050 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1051 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1052 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1053 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1054 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1055 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1056 { {36, 36, 36, 36, 30, 26, 36, 36, 30, 26, 36, 36, 30, 24} },
1057 { {34, 34, 34, 34, 30, 26, 34, 34, 30, 26, 34, 34, 30, 24} },
1058 },
1059 .ctlIndex_5G = {
1060 0x10, 0x16, 0x18, 0x40, 0x46,
1061 0x48, 0x30, 0x36, 0x38
1062 },
1063 .ctl_freqbin_5G = {
1064 {
1065 /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1066 /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1067 /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
1068 /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1069 /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
1070 /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1071 /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1072 /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1073 },
1074 {
1075 /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1076 /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1077 /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
1078 /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1079 /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
1080 /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1081 /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1082 /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1083 },
1084
1085 {
1086 /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1087 /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
1088 /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
1089 /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
1090 /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
1091 /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
1092 /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
1093 /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
1094 },
1095
1096 {
1097 /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1098 /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
1099 /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
1100 /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
1101 /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
1102 /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1103 /* Data[3].ctlEdges[6].bChannel */ 0xFF,
1104 /* Data[3].ctlEdges[7].bChannel */ 0xFF,
1105 },
1106
1107 {
1108 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1109 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1110 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
1111 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
1112 /* Data[4].ctlEdges[4].bChannel */ 0xFF,
1113 /* Data[4].ctlEdges[5].bChannel */ 0xFF,
1114 /* Data[4].ctlEdges[6].bChannel */ 0xFF,
1115 /* Data[4].ctlEdges[7].bChannel */ 0xFF,
1116 },
1117
1118 {
1119 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1120 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
1121 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
1122 /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
1123 /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
1124 /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
1125 /* Data[5].ctlEdges[6].bChannel */ 0xFF,
1126 /* Data[5].ctlEdges[7].bChannel */ 0xFF
1127 },
1128
1129 {
1130 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1131 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
1132 /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
1133 /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
1134 /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
1135 /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
1136 /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
1137 /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
1138 },
1139
1140 {
1141 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1142 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1143 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
1144 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1145 /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
1146 /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1147 /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1148 /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1149 },
1150
1151 {
1152 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1153 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
1154 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
1155 /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
1156 /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
1157 /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
1158 /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
1159 /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
1160 }
1161 },
1162 .ctlPowerData_5G = {
1163 {
1164 {
1165 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1166 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1167 }
1168 },
1169 {
1170 {
1171 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1172 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1173 }
1174 },
1175 {
1176 {
1177 CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1178 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1179 }
1180 },
1181 {
1182 {
1183 CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1184 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1185 }
1186 },
1187 {
1188 {
1189 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1190 CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1191 }
1192 },
1193 {
1194 {
1195 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1196 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1197 }
1198 },
1199 {
1200 {
1201 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1202 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1203 }
1204 },
1205 {
1206 {
1207 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1208 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1209 }
1210 },
1211 {
1212 {
1213 CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
1214 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1215 }
1216 },
1217 }
1218};
1219
1220
1221static const struct ar9300_eeprom ar9300_h112 = {
1222 .eepromVersion = 2,
1223 .templateVersion = 3,
1224 .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
1225 .custData = {"h112-241-f0000"},
1226 .baseEepHeader = {
1227 .regDmn = { LE16(0), LE16(0x1f) },
1228 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
1229 .opCapFlags = {
1230 .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
1231 .eepMisc = 0,
1232 },
1233 .rfSilent = 0,
1234 .blueToothOptions = 0,
1235 .deviceCap = 0,
1236 .deviceType = 5, /* takes lower byte in eeprom location */
1237 .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
1238 .params_for_tuning_caps = {0, 0},
1239 .featureEnable = 0x0d,
1240 /*
1241 * bit0 - enable tx temp comp - disabled
1242 * bit1 - enable tx volt comp - disabled
1243 * bit2 - enable fastClock - enabled
1244 * bit3 - enable doubling - enabled
1245 * bit4 - enable internal regulator - disabled
1246 * bit5 - enable pa predistortion - disabled
1247 */
1248 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
1249 .eepromWriteEnableGpio = 6,
1250 .wlanDisableGpio = 0,
1251 .wlanLedGpio = 8,
1252 .rxBandSelectGpio = 0xff,
1253 .txrxgain = 0x10,
1254 .swreg = 0,
1255 },
1256 .modalHeader2G = {
1257 /* ar9300_modal_eep_header 2g */
1258 /* 4 idle,t1,t2,b(4 bits per setting) */
1259 .antCtrlCommon = LE32(0x110),
1260 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
1261 .antCtrlCommon2 = LE32(0x44444),
1262
1263 /*
1264 * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
1265 * rx1, rx12, b (2 bits each)
1266 */
1267 .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
1268
1269 /*
1270 * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
1271 * for ar9280 (0xa20c/b20c 5:0)
1272 */
1273 .xatten1DB = {0, 0, 0},
1274
1275 /*
1276 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
1277 * for ar9280 (0xa20c/b20c 16:12
1278 */
1279 .xatten1Margin = {0, 0, 0},
1280 .tempSlope = 25,
1281 .voltSlope = 0,
1282
1283 /*
1284 * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
1285 * channels in usual fbin coding format
1286 */
1287 .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
1288
1289 /*
1290 * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
1291 * if the register is per chain
1292 */
1293 .noiseFloorThreshCh = {-1, 0, 0},
1294 .ob = {1, 1, 1},/* 3 chain */
1295 .db_stage2 = {1, 1, 1}, /* 3 chain */
1296 .db_stage3 = {0, 0, 0},
1297 .db_stage4 = {0, 0, 0},
1298 .xpaBiasLvl = 0,
1299 .txFrameToDataStart = 0x0e,
1300 .txFrameToPaOn = 0x0e,
1301 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
1302 .antennaGain = 0,
1303 .switchSettling = 0x2c,
1304 .adcDesiredSize = -30,
1305 .txEndToXpaOff = 0,
1306 .txEndToRxOn = 0x2,
1307 .txFrameToXpaOn = 0xe,
1308 .thresh62 = 28,
1309 .papdRateMaskHt20 = LE32(0x80c080),
1310 .papdRateMaskHt40 = LE32(0x80c080),
1311 .futureModal = {
1312 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1313 },
1314 },
1315 .base_ext1 = {
1316 .ant_div_control = 0,
1317 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
1318 },
1319 .calFreqPier2G = {
1320 FREQ2FBIN(2412, 1),
1321 FREQ2FBIN(2437, 1),
1322 FREQ2FBIN(2472, 1),
1323 },
1324 /* ar9300_cal_data_per_freq_op_loop 2g */
1325 .calPierData2G = {
1326 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1327 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1328 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1329 },
1330 .calTarget_freqbin_Cck = {
1331 FREQ2FBIN(2412, 1),
1332 FREQ2FBIN(2484, 1),
1333 },
1334 .calTarget_freqbin_2G = {
1335 FREQ2FBIN(2412, 1),
1336 FREQ2FBIN(2437, 1),
1337 FREQ2FBIN(2472, 1)
1338 },
1339 .calTarget_freqbin_2GHT20 = {
1340 FREQ2FBIN(2412, 1),
1341 FREQ2FBIN(2437, 1),
1342 FREQ2FBIN(2472, 1)
1343 },
1344 .calTarget_freqbin_2GHT40 = {
1345 FREQ2FBIN(2412, 1),
1346 FREQ2FBIN(2437, 1),
1347 FREQ2FBIN(2472, 1)
1348 },
1349 .calTargetPowerCck = {
1350 /* 1L-5L,5S,11L,11S */
1351 { {34, 34, 34, 34} },
1352 { {34, 34, 34, 34} },
1353 },
1354 .calTargetPower2G = {
1355 /* 6-24,36,48,54 */
1356 { {34, 34, 32, 32} },
1357 { {34, 34, 32, 32} },
1358 { {34, 34, 32, 32} },
1359 },
1360 .calTargetPower2GHT20 = {
1361 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
1362 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
1363 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
1364 },
1365 .calTargetPower2GHT40 = {
1366 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
1367 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
1368 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
1369 },
1370 .ctlIndex_2G = {
1371 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
1372 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
1373 },
1374 .ctl_freqbin_2G = {
1375 {
1376 FREQ2FBIN(2412, 1),
1377 FREQ2FBIN(2417, 1),
1378 FREQ2FBIN(2457, 1),
1379 FREQ2FBIN(2462, 1)
1380 },
1381 {
1382 FREQ2FBIN(2412, 1),
1383 FREQ2FBIN(2417, 1),
1384 FREQ2FBIN(2462, 1),
1385 0xFF,
1386 },
1387
1388 {
1389 FREQ2FBIN(2412, 1),
1390 FREQ2FBIN(2417, 1),
1391 FREQ2FBIN(2462, 1),
1392 0xFF,
1393 },
1394 {
1395 FREQ2FBIN(2422, 1),
1396 FREQ2FBIN(2427, 1),
1397 FREQ2FBIN(2447, 1),
1398 FREQ2FBIN(2452, 1)
1399 },
1400
1401 {
1402 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1403 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1404 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
1405 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
1406 },
1407
1408 {
1409 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1410 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1411 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
1412 0,
1413 },
1414
1415 {
1416 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1417 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1418 FREQ2FBIN(2472, 1),
1419 0,
1420 },
1421
1422 {
1423 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
1424 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
1425 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
1426 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
1427 },
1428
1429 {
1430 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1431 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1432 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
1433 },
1434
1435 {
1436 /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1437 /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1438 /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
1439 0
1440 },
1441
1442 {
1443 /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1444 /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1445 /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
1446 0
1447 },
1448
1449 {
1450 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
1451 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
1452 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
1453 /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
1454 }
1455 },
1456 .ctlPowerData_2G = {
1457 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1458 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1459 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
1460
1461 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
1462 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1463 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1464
1465 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
1466 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1467 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1468
1469 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1470 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
1471 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
1472 },
1473 .modalHeader5G = {
1474 /* 4 idle,t1,t2,b (4 bits per setting) */
1475 .antCtrlCommon = LE32(0x220),
1476 /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
1477 .antCtrlCommon2 = LE32(0x44444),
1478 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
1479 .antCtrlChain = {
1480 LE16(0x150), LE16(0x150), LE16(0x150),
1481 },
1482 /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
1483 .xatten1DB = {0, 0, 0},
1484
1485 /*
1486 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
1487 * for merlin (0xa20c/b20c 16:12
1488 */
1489 .xatten1Margin = {0, 0, 0},
1490 .tempSlope = 45,
1491 .voltSlope = 0,
1492 /* spurChans spur channels in usual fbin coding format */
1493 .spurChans = {0, 0, 0, 0, 0},
1494 /* noiseFloorThreshCh Check if the register is per chain */
1495 .noiseFloorThreshCh = {-1, 0, 0},
1496 .ob = {3, 3, 3}, /* 3 chain */
1497 .db_stage2 = {3, 3, 3}, /* 3 chain */
1498 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
1499 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
1500 .xpaBiasLvl = 0,
1501 .txFrameToDataStart = 0x0e,
1502 .txFrameToPaOn = 0x0e,
1503 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
1504 .antennaGain = 0,
1505 .switchSettling = 0x2d,
1506 .adcDesiredSize = -30,
1507 .txEndToXpaOff = 0,
1508 .txEndToRxOn = 0x2,
1509 .txFrameToXpaOn = 0xe,
1510 .thresh62 = 28,
1511 .papdRateMaskHt20 = LE32(0x0cf0e0e0),
1512 .papdRateMaskHt40 = LE32(0x6cf0e0e0),
1513 .futureModal = {
1514 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1515 },
1516 },
1517 .base_ext2 = {
1518 .tempSlopeLow = 40,
1519 .tempSlopeHigh = 50,
1520 .xatten1DBLow = {0, 0, 0},
1521 .xatten1MarginLow = {0, 0, 0},
1522 .xatten1DBHigh = {0, 0, 0},
1523 .xatten1MarginHigh = {0, 0, 0}
1524 },
1525 .calFreqPier5G = {
1526 FREQ2FBIN(5180, 0),
1527 FREQ2FBIN(5220, 0),
1528 FREQ2FBIN(5320, 0),
1529 FREQ2FBIN(5400, 0),
1530 FREQ2FBIN(5500, 0),
1531 FREQ2FBIN(5600, 0),
1532 FREQ2FBIN(5700, 0),
1533 FREQ2FBIN(5825, 0)
1534 },
1535 .calPierData5G = {
1536 {
1537 {0, 0, 0, 0, 0},
1538 {0, 0, 0, 0, 0},
1539 {0, 0, 0, 0, 0},
1540 {0, 0, 0, 0, 0},
1541 {0, 0, 0, 0, 0},
1542 {0, 0, 0, 0, 0},
1543 {0, 0, 0, 0, 0},
1544 {0, 0, 0, 0, 0},
1545 },
1546 {
1547 {0, 0, 0, 0, 0},
1548 {0, 0, 0, 0, 0},
1549 {0, 0, 0, 0, 0},
1550 {0, 0, 0, 0, 0},
1551 {0, 0, 0, 0, 0},
1552 {0, 0, 0, 0, 0},
1553 {0, 0, 0, 0, 0},
1554 {0, 0, 0, 0, 0},
1555 },
1556 {
1557 {0, 0, 0, 0, 0},
1558 {0, 0, 0, 0, 0},
1559 {0, 0, 0, 0, 0},
1560 {0, 0, 0, 0, 0},
1561 {0, 0, 0, 0, 0},
1562 {0, 0, 0, 0, 0},
1563 {0, 0, 0, 0, 0},
1564 {0, 0, 0, 0, 0},
1565 },
1566
1567 },
1568 .calTarget_freqbin_5G = {
1569 FREQ2FBIN(5180, 0),
1570 FREQ2FBIN(5240, 0),
1571 FREQ2FBIN(5320, 0),
1572 FREQ2FBIN(5400, 0),
1573 FREQ2FBIN(5500, 0),
1574 FREQ2FBIN(5600, 0),
1575 FREQ2FBIN(5700, 0),
1576 FREQ2FBIN(5825, 0)
1577 },
1578 .calTarget_freqbin_5GHT20 = {
1579 FREQ2FBIN(5180, 0),
1580 FREQ2FBIN(5240, 0),
1581 FREQ2FBIN(5320, 0),
1582 FREQ2FBIN(5400, 0),
1583 FREQ2FBIN(5500, 0),
1584 FREQ2FBIN(5700, 0),
1585 FREQ2FBIN(5745, 0),
1586 FREQ2FBIN(5825, 0)
1587 },
1588 .calTarget_freqbin_5GHT40 = {
1589 FREQ2FBIN(5180, 0),
1590 FREQ2FBIN(5240, 0),
1591 FREQ2FBIN(5320, 0),
1592 FREQ2FBIN(5400, 0),
1593 FREQ2FBIN(5500, 0),
1594 FREQ2FBIN(5700, 0),
1595 FREQ2FBIN(5745, 0),
1596 FREQ2FBIN(5825, 0)
1597 },
1598 .calTargetPower5G = {
1599 /* 6-24,36,48,54 */
1600 { {30, 30, 28, 24} },
1601 { {30, 30, 28, 24} },
1602 { {30, 30, 28, 24} },
1603 { {30, 30, 28, 24} },
1604 { {30, 30, 28, 24} },
1605 { {30, 30, 28, 24} },
1606 { {30, 30, 28, 24} },
1607 { {30, 30, 28, 24} },
1608 },
1609 .calTargetPower5GHT20 = {
1610 /*
1611 * 0_8_16,1-3_9-11_17-19,
1612 * 4,5,6,7,12,13,14,15,20,21,22,23
1613 */
1614 { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} },
1615 { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} },
1616 { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} },
1617 { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} },
1618 { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} },
1619 { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} },
1620 { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} },
1621 { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} },
1622 },
1623 .calTargetPower5GHT40 = {
1624 /*
1625 * 0_8_16,1-3_9-11_17-19,
1626 * 4,5,6,7,12,13,14,15,20,21,22,23
1627 */
1628 { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} },
1629 { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} },
1630 { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} },
1631 { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} },
1632 { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} },
1633 { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} },
1634 { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} },
1635 { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} },
1636 },
1637 .ctlIndex_5G = {
1638 0x10, 0x16, 0x18, 0x40, 0x46,
1639 0x48, 0x30, 0x36, 0x38
1640 },
1641 .ctl_freqbin_5G = {
1642 {
1643 /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1644 /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1645 /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
1646 /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1647 /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
1648 /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1649 /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1650 /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1651 },
1652 {
1653 /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1654 /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1655 /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
1656 /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1657 /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
1658 /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1659 /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1660 /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1661 },
1662
1663 {
1664 /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1665 /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
1666 /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
1667 /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
1668 /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
1669 /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
1670 /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
1671 /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
1672 },
1673
1674 {
1675 /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1676 /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
1677 /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
1678 /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
1679 /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
1680 /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1681 /* Data[3].ctlEdges[6].bChannel */ 0xFF,
1682 /* Data[3].ctlEdges[7].bChannel */ 0xFF,
1683 },
1684
1685 {
1686 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1687 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1688 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
1689 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
1690 /* Data[4].ctlEdges[4].bChannel */ 0xFF,
1691 /* Data[4].ctlEdges[5].bChannel */ 0xFF,
1692 /* Data[4].ctlEdges[6].bChannel */ 0xFF,
1693 /* Data[4].ctlEdges[7].bChannel */ 0xFF,
1694 },
1695
1696 {
1697 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1698 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
1699 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
1700 /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
1701 /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
1702 /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
1703 /* Data[5].ctlEdges[6].bChannel */ 0xFF,
1704 /* Data[5].ctlEdges[7].bChannel */ 0xFF
1705 },
1706
1707 {
1708 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1709 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
1710 /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
1711 /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
1712 /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
1713 /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
1714 /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
1715 /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
1716 },
1717
1718 {
1719 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1720 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1721 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
1722 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1723 /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
1724 /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1725 /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1726 /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1727 },
1728
1729 {
1730 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1731 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
1732 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
1733 /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
1734 /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
1735 /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
1736 /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
1737 /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
1738 }
1739 },
1740 .ctlPowerData_5G = {
1741 {
1742 {
1743 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1744 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1745 }
1746 },
1747 {
1748 {
1749 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1750 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1751 }
1752 },
1753 {
1754 {
1755 CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1756 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1757 }
1758 },
1759 {
1760 {
1761 CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1762 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1763 }
1764 },
1765 {
1766 {
1767 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1768 CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1769 }
1770 },
1771 {
1772 {
1773 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1774 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1775 }
1776 },
1777 {
1778 {
1779 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1780 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1781 }
1782 },
1783 {
1784 {
1785 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1786 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1787 }
1788 },
1789 {
1790 {
1791 CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
1792 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1793 }
1794 },
1795 }
1796};
1797
1798
1799static const struct ar9300_eeprom ar9300_x112 = {
1800 .eepromVersion = 2,
1801 .templateVersion = 5,
1802 .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
1803 .custData = {"x112-041-f0000"},
1804 .baseEepHeader = {
1805 .regDmn = { LE16(0), LE16(0x1f) },
1806 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
1807 .opCapFlags = {
1808 .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
1809 .eepMisc = 0,
1810 },
1811 .rfSilent = 0,
1812 .blueToothOptions = 0,
1813 .deviceCap = 0,
1814 .deviceType = 5, /* takes lower byte in eeprom location */
1815 .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
1816 .params_for_tuning_caps = {0, 0},
1817 .featureEnable = 0x0d,
1818 /*
1819 * bit0 - enable tx temp comp - disabled
1820 * bit1 - enable tx volt comp - disabled
1821 * bit2 - enable fastclock - enabled
1822 * bit3 - enable doubling - enabled
1823 * bit4 - enable internal regulator - disabled
1824 * bit5 - enable pa predistortion - disabled
1825 */
1826 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
1827 .eepromWriteEnableGpio = 6,
1828 .wlanDisableGpio = 0,
1829 .wlanLedGpio = 8,
1830 .rxBandSelectGpio = 0xff,
1831 .txrxgain = 0x0,
1832 .swreg = 0,
1833 },
1834 .modalHeader2G = {
1835 /* ar9300_modal_eep_header 2g */
1836 /* 4 idle,t1,t2,b(4 bits per setting) */
1837 .antCtrlCommon = LE32(0x110),
1838 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
1839 .antCtrlCommon2 = LE32(0x22222),
1840
1841 /*
1842 * antCtrlChain[ar9300_max_chains]; 6 idle, t, r,
1843 * rx1, rx12, b (2 bits each)
1844 */
1845 .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) },
1846
1847 /*
1848 * xatten1DB[AR9300_max_chains]; 3 xatten1_db
1849 * for ar9280 (0xa20c/b20c 5:0)
1850 */
1851 .xatten1DB = {0x1b, 0x1b, 0x1b},
1852
1853 /*
1854 * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin
1855 * for ar9280 (0xa20c/b20c 16:12
1856 */
1857 .xatten1Margin = {0x15, 0x15, 0x15},
1858 .tempSlope = 50,
1859 .voltSlope = 0,
1860
1861 /*
1862 * spurChans[OSPrey_eeprom_modal_sPURS]; spur
1863 * channels in usual fbin coding format
1864 */
1865 .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
1866
1867 /*
1868 * noiseFloorThreshch[ar9300_max_cHAINS]; 3 Check
1869 * if the register is per chain
1870 */
1871 .noiseFloorThreshCh = {-1, 0, 0},
1872 .ob = {1, 1, 1},/* 3 chain */
1873 .db_stage2 = {1, 1, 1}, /* 3 chain */
1874 .db_stage3 = {0, 0, 0},
1875 .db_stage4 = {0, 0, 0},
1876 .xpaBiasLvl = 0,
1877 .txFrameToDataStart = 0x0e,
1878 .txFrameToPaOn = 0x0e,
1879 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
1880 .antennaGain = 0,
1881 .switchSettling = 0x2c,
1882 .adcDesiredSize = -30,
1883 .txEndToXpaOff = 0,
1884 .txEndToRxOn = 0x2,
1885 .txFrameToXpaOn = 0xe,
1886 .thresh62 = 28,
1887 .papdRateMaskHt20 = LE32(0x0c80c080),
1888 .papdRateMaskHt40 = LE32(0x0080c080),
1889 .futureModal = {
1890 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1891 },
1892 },
1893 .base_ext1 = {
1894 .ant_div_control = 0,
1895 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
1896 },
1897 .calFreqPier2G = {
1898 FREQ2FBIN(2412, 1),
1899 FREQ2FBIN(2437, 1),
1900 FREQ2FBIN(2472, 1),
1901 },
1902 /* ar9300_cal_data_per_freq_op_loop 2g */
1903 .calPierData2G = {
1904 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1905 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1906 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1907 },
1908 .calTarget_freqbin_Cck = {
1909 FREQ2FBIN(2412, 1),
1910 FREQ2FBIN(2472, 1),
1911 },
1912 .calTarget_freqbin_2G = {
1913 FREQ2FBIN(2412, 1),
1914 FREQ2FBIN(2437, 1),
1915 FREQ2FBIN(2472, 1)
1916 },
1917 .calTarget_freqbin_2GHT20 = {
1918 FREQ2FBIN(2412, 1),
1919 FREQ2FBIN(2437, 1),
1920 FREQ2FBIN(2472, 1)
1921 },
1922 .calTarget_freqbin_2GHT40 = {
1923 FREQ2FBIN(2412, 1),
1924 FREQ2FBIN(2437, 1),
1925 FREQ2FBIN(2472, 1)
1926 },
1927 .calTargetPowerCck = {
1928 /* 1L-5L,5S,11L,11s */
1929 { {38, 38, 38, 38} },
1930 { {38, 38, 38, 38} },
1931 },
1932 .calTargetPower2G = {
1933 /* 6-24,36,48,54 */
1934 { {38, 38, 36, 34} },
1935 { {38, 38, 36, 34} },
1936 { {38, 38, 34, 32} },
1937 },
1938 .calTargetPower2GHT20 = {
1939 { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} },
1940 { {36, 36, 36, 36, 36, 34, 36, 34, 32, 30, 30, 30, 28, 26} },
1941 { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} },
1942 },
1943 .calTargetPower2GHT40 = {
1944 { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} },
1945 { {36, 36, 36, 36, 34, 32, 34, 32, 30, 28, 28, 28, 28, 24} },
1946 { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} },
1947 },
1948 .ctlIndex_2G = {
1949 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
1950 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
1951 },
1952 .ctl_freqbin_2G = {
1953 {
1954 FREQ2FBIN(2412, 1),
1955 FREQ2FBIN(2417, 1),
1956 FREQ2FBIN(2457, 1),
1957 FREQ2FBIN(2462, 1)
1958 },
1959 {
1960 FREQ2FBIN(2412, 1),
1961 FREQ2FBIN(2417, 1),
1962 FREQ2FBIN(2462, 1),
1963 0xFF,
1964 },
1965
1966 {
1967 FREQ2FBIN(2412, 1),
1968 FREQ2FBIN(2417, 1),
1969 FREQ2FBIN(2462, 1),
1970 0xFF,
1971 },
1972 {
1973 FREQ2FBIN(2422, 1),
1974 FREQ2FBIN(2427, 1),
1975 FREQ2FBIN(2447, 1),
1976 FREQ2FBIN(2452, 1)
1977 },
1978
1979 {
1980 /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
1981 /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
1982 /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
1983 /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(2484, 1),
1984 },
1985
1986 {
1987 /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
1988 /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
1989 /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
1990 0,
1991 },
1992
1993 {
1994 /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
1995 /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
1996 FREQ2FBIN(2472, 1),
1997 0,
1998 },
1999
2000 {
2001 /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(2422, 1),
2002 /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(2427, 1),
2003 /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(2447, 1),
2004 /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(2462, 1),
2005 },
2006
2007 {
2008 /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
2009 /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
2010 /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
2011 },
2012
2013 {
2014 /* Data[9].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
2015 /* Data[9].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
2016 /* Data[9].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
2017 0
2018 },
2019
2020 {
2021 /* Data[10].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
2022 /* Data[10].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
2023 /* Data[10].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
2024 0
2025 },
2026
2027 {
2028 /* Data[11].ctledges[0].bchannel */ FREQ2FBIN(2422, 1),
2029 /* Data[11].ctledges[1].bchannel */ FREQ2FBIN(2427, 1),
2030 /* Data[11].ctledges[2].bchannel */ FREQ2FBIN(2447, 1),
2031 /* Data[11].ctledges[3].bchannel */ FREQ2FBIN(2462, 1),
2032 }
2033 },
2034 .ctlPowerData_2G = {
2035 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2036 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2037 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
2038
2039 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
2040 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2041 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2042
2043 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
2044 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2045 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2046
2047 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2048 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
2049 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
2050 },
2051 .modalHeader5G = {
2052 /* 4 idle,t1,t2,b (4 bits per setting) */
2053 .antCtrlCommon = LE32(0x110),
2054 /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
2055 .antCtrlCommon2 = LE32(0x22222),
2056 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
2057 .antCtrlChain = {
2058 LE16(0x0), LE16(0x0), LE16(0x0),
2059 },
2060 /* xatten1DB 3 xatten1_db for ar9280 (0xa20c/b20c 5:0) */
2061 .xatten1DB = {0x13, 0x19, 0x17},
2062
2063 /*
2064 * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin
2065 * for merlin (0xa20c/b20c 16:12
2066 */
2067 .xatten1Margin = {0x19, 0x19, 0x19},
2068 .tempSlope = 70,
2069 .voltSlope = 15,
2070 /* spurChans spur channels in usual fbin coding format */
2071 .spurChans = {0, 0, 0, 0, 0},
2072 /* noiseFloorThreshch check if the register is per chain */
2073 .noiseFloorThreshCh = {-1, 0, 0},
2074 .ob = {3, 3, 3}, /* 3 chain */
2075 .db_stage2 = {3, 3, 3}, /* 3 chain */
2076 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
2077 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
2078 .xpaBiasLvl = 0,
2079 .txFrameToDataStart = 0x0e,
2080 .txFrameToPaOn = 0x0e,
2081 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
2082 .antennaGain = 0,
2083 .switchSettling = 0x2d,
2084 .adcDesiredSize = -30,
2085 .txEndToXpaOff = 0,
2086 .txEndToRxOn = 0x2,
2087 .txFrameToXpaOn = 0xe,
2088 .thresh62 = 28,
2089 .papdRateMaskHt20 = LE32(0x0cf0e0e0),
2090 .papdRateMaskHt40 = LE32(0x6cf0e0e0),
2091 .futureModal = {
2092 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2093 },
2094 },
2095 .base_ext2 = {
2096 .tempSlopeLow = 72,
2097 .tempSlopeHigh = 105,
2098 .xatten1DBLow = {0x10, 0x14, 0x10},
2099 .xatten1MarginLow = {0x19, 0x19 , 0x19},
2100 .xatten1DBHigh = {0x1d, 0x20, 0x24},
2101 .xatten1MarginHigh = {0x10, 0x10, 0x10}
2102 },
2103 .calFreqPier5G = {
2104 FREQ2FBIN(5180, 0),
2105 FREQ2FBIN(5220, 0),
2106 FREQ2FBIN(5320, 0),
2107 FREQ2FBIN(5400, 0),
2108 FREQ2FBIN(5500, 0),
2109 FREQ2FBIN(5600, 0),
2110 FREQ2FBIN(5700, 0),
2111 FREQ2FBIN(5785, 0)
2112 },
2113 .calPierData5G = {
2114 {
2115 {0, 0, 0, 0, 0},
2116 {0, 0, 0, 0, 0},
2117 {0, 0, 0, 0, 0},
2118 {0, 0, 0, 0, 0},
2119 {0, 0, 0, 0, 0},
2120 {0, 0, 0, 0, 0},
2121 {0, 0, 0, 0, 0},
2122 {0, 0, 0, 0, 0},
2123 },
2124 {
2125 {0, 0, 0, 0, 0},
2126 {0, 0, 0, 0, 0},
2127 {0, 0, 0, 0, 0},
2128 {0, 0, 0, 0, 0},
2129 {0, 0, 0, 0, 0},
2130 {0, 0, 0, 0, 0},
2131 {0, 0, 0, 0, 0},
2132 {0, 0, 0, 0, 0},
2133 },
2134 {
2135 {0, 0, 0, 0, 0},
2136 {0, 0, 0, 0, 0},
2137 {0, 0, 0, 0, 0},
2138 {0, 0, 0, 0, 0},
2139 {0, 0, 0, 0, 0},
2140 {0, 0, 0, 0, 0},
2141 {0, 0, 0, 0, 0},
2142 {0, 0, 0, 0, 0},
2143 },
2144
2145 },
2146 .calTarget_freqbin_5G = {
2147 FREQ2FBIN(5180, 0),
2148 FREQ2FBIN(5220, 0),
2149 FREQ2FBIN(5320, 0),
2150 FREQ2FBIN(5400, 0),
2151 FREQ2FBIN(5500, 0),
2152 FREQ2FBIN(5600, 0),
2153 FREQ2FBIN(5725, 0),
2154 FREQ2FBIN(5825, 0)
2155 },
2156 .calTarget_freqbin_5GHT20 = {
2157 FREQ2FBIN(5180, 0),
2158 FREQ2FBIN(5220, 0),
2159 FREQ2FBIN(5320, 0),
2160 FREQ2FBIN(5400, 0),
2161 FREQ2FBIN(5500, 0),
2162 FREQ2FBIN(5600, 0),
2163 FREQ2FBIN(5725, 0),
2164 FREQ2FBIN(5825, 0)
2165 },
2166 .calTarget_freqbin_5GHT40 = {
2167 FREQ2FBIN(5180, 0),
2168 FREQ2FBIN(5220, 0),
2169 FREQ2FBIN(5320, 0),
2170 FREQ2FBIN(5400, 0),
2171 FREQ2FBIN(5500, 0),
2172 FREQ2FBIN(5600, 0),
2173 FREQ2FBIN(5725, 0),
2174 FREQ2FBIN(5825, 0)
2175 },
2176 .calTargetPower5G = {
2177 /* 6-24,36,48,54 */
2178 { {32, 32, 28, 26} },
2179 { {32, 32, 28, 26} },
2180 { {32, 32, 28, 26} },
2181 { {32, 32, 26, 24} },
2182 { {32, 32, 26, 24} },
2183 { {32, 32, 24, 22} },
2184 { {30, 30, 24, 22} },
2185 { {30, 30, 24, 22} },
2186 },
2187 .calTargetPower5GHT20 = {
2188 /*
2189 * 0_8_16,1-3_9-11_17-19,
2190 * 4,5,6,7,12,13,14,15,20,21,22,23
2191 */
2192 { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
2193 { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
2194 { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
2195 { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 22, 22, 20, 20} },
2196 { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 20, 18, 16, 16} },
2197 { {32, 32, 32, 32, 28, 26, 32, 24, 20, 16, 18, 16, 14, 14} },
2198 { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} },
2199 { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} },
2200 },
2201 .calTargetPower5GHT40 = {
2202 /*
2203 * 0_8_16,1-3_9-11_17-19,
2204 * 4,5,6,7,12,13,14,15,20,21,22,23
2205 */
2206 { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
2207 { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
2208 { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
2209 { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 22, 22, 20, 20} },
2210 { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 20, 18, 16, 16} },
2211 { {32, 32, 32, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
2212 { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
2213 { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
2214 },
2215 .ctlIndex_5G = {
2216 0x10, 0x16, 0x18, 0x40, 0x46,
2217 0x48, 0x30, 0x36, 0x38
2218 },
2219 .ctl_freqbin_5G = {
2220 {
2221 /* Data[0].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2222 /* Data[0].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
2223 /* Data[0].ctledges[2].bchannel */ FREQ2FBIN(5280, 0),
2224 /* Data[0].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
2225 /* Data[0].ctledges[4].bchannel */ FREQ2FBIN(5600, 0),
2226 /* Data[0].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
2227 /* Data[0].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
2228 /* Data[0].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
2229 },
2230 {
2231 /* Data[1].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2232 /* Data[1].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
2233 /* Data[1].ctledges[2].bchannel */ FREQ2FBIN(5280, 0),
2234 /* Data[1].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
2235 /* Data[1].ctledges[4].bchannel */ FREQ2FBIN(5520, 0),
2236 /* Data[1].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
2237 /* Data[1].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
2238 /* Data[1].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
2239 },
2240
2241 {
2242 /* Data[2].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
2243 /* Data[2].ctledges[1].bchannel */ FREQ2FBIN(5230, 0),
2244 /* Data[2].ctledges[2].bchannel */ FREQ2FBIN(5270, 0),
2245 /* Data[2].ctledges[3].bchannel */ FREQ2FBIN(5310, 0),
2246 /* Data[2].ctledges[4].bchannel */ FREQ2FBIN(5510, 0),
2247 /* Data[2].ctledges[5].bchannel */ FREQ2FBIN(5550, 0),
2248 /* Data[2].ctledges[6].bchannel */ FREQ2FBIN(5670, 0),
2249 /* Data[2].ctledges[7].bchannel */ FREQ2FBIN(5755, 0)
2250 },
2251
2252 {
2253 /* Data[3].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2254 /* Data[3].ctledges[1].bchannel */ FREQ2FBIN(5200, 0),
2255 /* Data[3].ctledges[2].bchannel */ FREQ2FBIN(5260, 0),
2256 /* Data[3].ctledges[3].bchannel */ FREQ2FBIN(5320, 0),
2257 /* Data[3].ctledges[4].bchannel */ FREQ2FBIN(5500, 0),
2258 /* Data[3].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
2259 /* Data[3].ctledges[6].bchannel */ 0xFF,
2260 /* Data[3].ctledges[7].bchannel */ 0xFF,
2261 },
2262
2263 {
2264 /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2265 /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
2266 /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(5500, 0),
2267 /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(5700, 0),
2268 /* Data[4].ctledges[4].bchannel */ 0xFF,
2269 /* Data[4].ctledges[5].bchannel */ 0xFF,
2270 /* Data[4].ctledges[6].bchannel */ 0xFF,
2271 /* Data[4].ctledges[7].bchannel */ 0xFF,
2272 },
2273
2274 {
2275 /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
2276 /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(5270, 0),
2277 /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(5310, 0),
2278 /* Data[5].ctledges[3].bchannel */ FREQ2FBIN(5510, 0),
2279 /* Data[5].ctledges[4].bchannel */ FREQ2FBIN(5590, 0),
2280 /* Data[5].ctledges[5].bchannel */ FREQ2FBIN(5670, 0),
2281 /* Data[5].ctledges[6].bchannel */ 0xFF,
2282 /* Data[5].ctledges[7].bchannel */ 0xFF
2283 },
2284
2285 {
2286 /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2287 /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(5200, 0),
2288 /* Data[6].ctledges[2].bchannel */ FREQ2FBIN(5220, 0),
2289 /* Data[6].ctledges[3].bchannel */ FREQ2FBIN(5260, 0),
2290 /* Data[6].ctledges[4].bchannel */ FREQ2FBIN(5500, 0),
2291 /* Data[6].ctledges[5].bchannel */ FREQ2FBIN(5600, 0),
2292 /* Data[6].ctledges[6].bchannel */ FREQ2FBIN(5700, 0),
2293 /* Data[6].ctledges[7].bchannel */ FREQ2FBIN(5745, 0)
2294 },
2295
2296 {
2297 /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2298 /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
2299 /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(5320, 0),
2300 /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
2301 /* Data[7].ctledges[4].bchannel */ FREQ2FBIN(5560, 0),
2302 /* Data[7].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
2303 /* Data[7].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
2304 /* Data[7].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
2305 },
2306
2307 {
2308 /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
2309 /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(5230, 0),
2310 /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(5270, 0),
2311 /* Data[8].ctledges[3].bchannel */ FREQ2FBIN(5510, 0),
2312 /* Data[8].ctledges[4].bchannel */ FREQ2FBIN(5550, 0),
2313 /* Data[8].ctledges[5].bchannel */ FREQ2FBIN(5670, 0),
2314 /* Data[8].ctledges[6].bchannel */ FREQ2FBIN(5755, 0),
2315 /* Data[8].ctledges[7].bchannel */ FREQ2FBIN(5795, 0)
2316 }
2317 },
2318 .ctlPowerData_5G = {
2319 {
2320 {
2321 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2322 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2323 }
2324 },
2325 {
2326 {
2327 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2328 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2329 }
2330 },
2331 {
2332 {
2333 CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2334 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2335 }
2336 },
2337 {
2338 {
2339 CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2340 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2341 }
2342 },
2343 {
2344 {
2345 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2346 CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2347 }
2348 },
2349 {
2350 {
2351 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2352 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2353 }
2354 },
2355 {
2356 {
2357 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2358 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2359 }
2360 },
2361 {
2362 {
2363 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2364 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2365 }
2366 },
2367 {
2368 {
2369 CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
2370 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2371 }
2372 },
2373 }
2374};
2375
2376static const struct ar9300_eeprom ar9300_h116 = {
2377 .eepromVersion = 2,
2378 .templateVersion = 4,
2379 .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
2380 .custData = {"h116-041-f0000"},
2381 .baseEepHeader = {
2382 .regDmn = { LE16(0), LE16(0x1f) },
2383 .txrxMask = 0x33, /* 4 bits tx and 4 bits rx */
2384 .opCapFlags = {
2385 .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
2386 .eepMisc = 0,
2387 },
2388 .rfSilent = 0,
2389 .blueToothOptions = 0,
2390 .deviceCap = 0,
2391 .deviceType = 5, /* takes lower byte in eeprom location */
2392 .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
2393 .params_for_tuning_caps = {0, 0},
2394 .featureEnable = 0x0d,
2395 /*
2396 * bit0 - enable tx temp comp - disabled
2397 * bit1 - enable tx volt comp - disabled
2398 * bit2 - enable fastClock - enabled
2399 * bit3 - enable doubling - enabled
2400 * bit4 - enable internal regulator - disabled
2401 * bit5 - enable pa predistortion - disabled
2402 */
2403 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
2404 .eepromWriteEnableGpio = 6,
2405 .wlanDisableGpio = 0,
2406 .wlanLedGpio = 8,
2407 .rxBandSelectGpio = 0xff,
2408 .txrxgain = 0x10,
2409 .swreg = 0,
2410 },
2411 .modalHeader2G = {
2412 /* ar9300_modal_eep_header 2g */
2413 /* 4 idle,t1,t2,b(4 bits per setting) */
2414 .antCtrlCommon = LE32(0x110),
2415 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
2416 .antCtrlCommon2 = LE32(0x44444),
2417
2418 /*
2419 * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
2420 * rx1, rx12, b (2 bits each)
2421 */
2422 .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) },
2423
2424 /*
2425 * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
2426 * for ar9280 (0xa20c/b20c 5:0)
2427 */
2428 .xatten1DB = {0x1f, 0x1f, 0x1f},
2429
2430 /*
2431 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
2432 * for ar9280 (0xa20c/b20c 16:12
2433 */
2434 .xatten1Margin = {0x12, 0x12, 0x12},
2435 .tempSlope = 25,
2436 .voltSlope = 0,
2437
2438 /*
2439 * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
2440 * channels in usual fbin coding format
2441 */
2442 .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
2443
2444 /*
2445 * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
2446 * if the register is per chain
2447 */
2448 .noiseFloorThreshCh = {-1, 0, 0},
2449 .ob = {1, 1, 1},/* 3 chain */
2450 .db_stage2 = {1, 1, 1}, /* 3 chain */
2451 .db_stage3 = {0, 0, 0},
2452 .db_stage4 = {0, 0, 0},
2453 .xpaBiasLvl = 0,
2454 .txFrameToDataStart = 0x0e,
2455 .txFrameToPaOn = 0x0e,
2456 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
2457 .antennaGain = 0,
2458 .switchSettling = 0x2c,
2459 .adcDesiredSize = -30,
2460 .txEndToXpaOff = 0,
2461 .txEndToRxOn = 0x2,
2462 .txFrameToXpaOn = 0xe,
2463 .thresh62 = 28,
2464 .papdRateMaskHt20 = LE32(0x0c80C080),
2465 .papdRateMaskHt40 = LE32(0x0080C080),
2466 .futureModal = {
2467 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2468 },
2469 },
2470 .base_ext1 = {
2471 .ant_div_control = 0,
2472 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
2473 },
2474 .calFreqPier2G = {
2475 FREQ2FBIN(2412, 1),
2476 FREQ2FBIN(2437, 1),
2477 FREQ2FBIN(2472, 1),
2478 },
2479 /* ar9300_cal_data_per_freq_op_loop 2g */
2480 .calPierData2G = {
2481 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
2482 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
2483 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
2484 },
2485 .calTarget_freqbin_Cck = {
2486 FREQ2FBIN(2412, 1),
2487 FREQ2FBIN(2472, 1),
2488 },
2489 .calTarget_freqbin_2G = {
2490 FREQ2FBIN(2412, 1),
2491 FREQ2FBIN(2437, 1),
2492 FREQ2FBIN(2472, 1)
2493 },
2494 .calTarget_freqbin_2GHT20 = {
2495 FREQ2FBIN(2412, 1),
2496 FREQ2FBIN(2437, 1),
2497 FREQ2FBIN(2472, 1)
2498 },
2499 .calTarget_freqbin_2GHT40 = {
2500 FREQ2FBIN(2412, 1),
2501 FREQ2FBIN(2437, 1),
2502 FREQ2FBIN(2472, 1)
2503 },
2504 .calTargetPowerCck = {
2505 /* 1L-5L,5S,11L,11S */
2506 { {34, 34, 34, 34} },
2507 { {34, 34, 34, 34} },
2508 },
2509 .calTargetPower2G = {
2510 /* 6-24,36,48,54 */
2511 { {34, 34, 32, 32} },
2512 { {34, 34, 32, 32} },
2513 { {34, 34, 32, 32} },
2514 },
2515 .calTargetPower2GHT20 = {
2516 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
2517 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
2518 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
2519 },
2520 .calTargetPower2GHT40 = {
2521 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
2522 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
2523 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
2524 },
2525 .ctlIndex_2G = {
2526 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
2527 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
2528 },
2529 .ctl_freqbin_2G = {
2530 {
2531 FREQ2FBIN(2412, 1),
2532 FREQ2FBIN(2417, 1),
2533 FREQ2FBIN(2457, 1),
2534 FREQ2FBIN(2462, 1)
2535 },
2536 {
2537 FREQ2FBIN(2412, 1),
2538 FREQ2FBIN(2417, 1),
2539 FREQ2FBIN(2462, 1),
2540 0xFF,
2541 },
2542
2543 {
2544 FREQ2FBIN(2412, 1),
2545 FREQ2FBIN(2417, 1),
2546 FREQ2FBIN(2462, 1),
2547 0xFF,
2548 },
2549 {
2550 FREQ2FBIN(2422, 1),
2551 FREQ2FBIN(2427, 1),
2552 FREQ2FBIN(2447, 1),
2553 FREQ2FBIN(2452, 1)
2554 },
2555
2556 {
2557 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2558 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2559 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
2560 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
2561 },
2562
2563 {
2564 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2565 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2566 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
2567 0,
2568 },
2569
2570 {
2571 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2572 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2573 FREQ2FBIN(2472, 1),
2574 0,
2575 },
2576
2577 {
2578 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
2579 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
2580 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
2581 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
2582 },
2583
2584 {
2585 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2586 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2587 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
2588 },
2589
2590 {
2591 /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2592 /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2593 /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
2594 0
2595 },
2596
2597 {
2598 /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2599 /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2600 /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
2601 0
2602 },
2603
2604 {
2605 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
2606 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
2607 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
2608 /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
2609 }
2610 },
2611 .ctlPowerData_2G = {
2612 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2613 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2614 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
2615
2616 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
2617 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2618 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2619
2620 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
2621 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2622 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2623
2624 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2625 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
2626 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
2627 },
2628 .modalHeader5G = {
2629 /* 4 idle,t1,t2,b (4 bits per setting) */
2630 .antCtrlCommon = LE32(0x220),
2631 /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
2632 .antCtrlCommon2 = LE32(0x44444),
2633 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
2634 .antCtrlChain = {
2635 LE16(0x150), LE16(0x150), LE16(0x150),
2636 },
2637 /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
2638 .xatten1DB = {0x19, 0x19, 0x19},
2639
2640 /*
2641 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
2642 * for merlin (0xa20c/b20c 16:12
2643 */
2644 .xatten1Margin = {0x14, 0x14, 0x14},
2645 .tempSlope = 70,
2646 .voltSlope = 0,
2647 /* spurChans spur channels in usual fbin coding format */
2648 .spurChans = {0, 0, 0, 0, 0},
2649 /* noiseFloorThreshCh Check if the register is per chain */
2650 .noiseFloorThreshCh = {-1, 0, 0},
2651 .ob = {3, 3, 3}, /* 3 chain */
2652 .db_stage2 = {3, 3, 3}, /* 3 chain */
2653 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
2654 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
2655 .xpaBiasLvl = 0,
2656 .txFrameToDataStart = 0x0e,
2657 .txFrameToPaOn = 0x0e,
2658 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
2659 .antennaGain = 0,
2660 .switchSettling = 0x2d,
2661 .adcDesiredSize = -30,
2662 .txEndToXpaOff = 0,
2663 .txEndToRxOn = 0x2,
2664 .txFrameToXpaOn = 0xe,
2665 .thresh62 = 28,
2666 .papdRateMaskHt20 = LE32(0x0cf0e0e0),
2667 .papdRateMaskHt40 = LE32(0x6cf0e0e0),
2668 .futureModal = {
2669 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2670 },
2671 },
2672 .base_ext2 = {
2673 .tempSlopeLow = 35,
2674 .tempSlopeHigh = 50,
2675 .xatten1DBLow = {0, 0, 0},
2676 .xatten1MarginLow = {0, 0, 0},
2677 .xatten1DBHigh = {0, 0, 0},
2678 .xatten1MarginHigh = {0, 0, 0}
2679 },
2680 .calFreqPier5G = {
2681 FREQ2FBIN(5180, 0),
2682 FREQ2FBIN(5220, 0),
2683 FREQ2FBIN(5320, 0),
2684 FREQ2FBIN(5400, 0),
2685 FREQ2FBIN(5500, 0),
2686 FREQ2FBIN(5600, 0),
2687 FREQ2FBIN(5700, 0),
2688 FREQ2FBIN(5785, 0)
2689 },
2690 .calPierData5G = {
2691 {
2692 {0, 0, 0, 0, 0},
2693 {0, 0, 0, 0, 0},
2694 {0, 0, 0, 0, 0},
2695 {0, 0, 0, 0, 0},
2696 {0, 0, 0, 0, 0},
2697 {0, 0, 0, 0, 0},
2698 {0, 0, 0, 0, 0},
2699 {0, 0, 0, 0, 0},
2700 },
2701 {
2702 {0, 0, 0, 0, 0},
2703 {0, 0, 0, 0, 0},
2704 {0, 0, 0, 0, 0},
2705 {0, 0, 0, 0, 0},
2706 {0, 0, 0, 0, 0},
2707 {0, 0, 0, 0, 0},
2708 {0, 0, 0, 0, 0},
2709 {0, 0, 0, 0, 0},
2710 },
2711 {
2712 {0, 0, 0, 0, 0},
2713 {0, 0, 0, 0, 0},
2714 {0, 0, 0, 0, 0},
2715 {0, 0, 0, 0, 0},
2716 {0, 0, 0, 0, 0},
2717 {0, 0, 0, 0, 0},
2718 {0, 0, 0, 0, 0},
2719 {0, 0, 0, 0, 0},
2720 },
2721
2722 },
2723 .calTarget_freqbin_5G = {
2724 FREQ2FBIN(5180, 0),
2725 FREQ2FBIN(5240, 0),
2726 FREQ2FBIN(5320, 0),
2727 FREQ2FBIN(5400, 0),
2728 FREQ2FBIN(5500, 0),
2729 FREQ2FBIN(5600, 0),
2730 FREQ2FBIN(5700, 0),
2731 FREQ2FBIN(5825, 0)
2732 },
2733 .calTarget_freqbin_5GHT20 = {
2734 FREQ2FBIN(5180, 0),
2735 FREQ2FBIN(5240, 0),
2736 FREQ2FBIN(5320, 0),
2737 FREQ2FBIN(5400, 0),
2738 FREQ2FBIN(5500, 0),
2739 FREQ2FBIN(5700, 0),
2740 FREQ2FBIN(5745, 0),
2741 FREQ2FBIN(5825, 0)
2742 },
2743 .calTarget_freqbin_5GHT40 = {
2744 FREQ2FBIN(5180, 0),
2745 FREQ2FBIN(5240, 0),
2746 FREQ2FBIN(5320, 0),
2747 FREQ2FBIN(5400, 0),
2748 FREQ2FBIN(5500, 0),
2749 FREQ2FBIN(5700, 0),
2750 FREQ2FBIN(5745, 0),
2751 FREQ2FBIN(5825, 0)
2752 },
2753 .calTargetPower5G = {
2754 /* 6-24,36,48,54 */
2755 { {30, 30, 28, 24} },
2756 { {30, 30, 28, 24} },
2757 { {30, 30, 28, 24} },
2758 { {30, 30, 28, 24} },
2759 { {30, 30, 28, 24} },
2760 { {30, 30, 28, 24} },
2761 { {30, 30, 28, 24} },
2762 { {30, 30, 28, 24} },
2763 },
2764 .calTargetPower5GHT20 = {
2765 /*
2766 * 0_8_16,1-3_9-11_17-19,
2767 * 4,5,6,7,12,13,14,15,20,21,22,23
2768 */
2769 { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} },
2770 { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} },
2771 { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} },
2772 { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} },
2773 { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} },
2774 { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} },
2775 { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} },
2776 { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} },
2777 },
2778 .calTargetPower5GHT40 = {
2779 /*
2780 * 0_8_16,1-3_9-11_17-19,
2781 * 4,5,6,7,12,13,14,15,20,21,22,23
2782 */
2783 { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} },
2784 { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} },
2785 { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} },
2786 { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} },
2787 { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} },
2788 { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} },
2789 { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} },
2790 { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} },
2791 },
2792 .ctlIndex_5G = {
2793 0x10, 0x16, 0x18, 0x40, 0x46,
2794 0x48, 0x30, 0x36, 0x38
2795 },
2796 .ctl_freqbin_5G = {
2797 {
2798 /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2799 /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
2800 /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
2801 /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
2802 /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
2803 /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
2804 /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
2805 /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
2806 },
2807 {
2808 /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2809 /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
2810 /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
2811 /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
2812 /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
2813 /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
2814 /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
2815 /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
2816 },
2817
2818 {
2819 /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
2820 /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
2821 /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
2822 /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
2823 /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
2824 /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
2825 /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
2826 /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
2827 },
2828
2829 {
2830 /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2831 /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
2832 /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
2833 /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
2834 /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
2835 /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
2836 /* Data[3].ctlEdges[6].bChannel */ 0xFF,
2837 /* Data[3].ctlEdges[7].bChannel */ 0xFF,
2838 },
2839
2840 {
2841 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2842 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
2843 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
2844 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
2845 /* Data[4].ctlEdges[4].bChannel */ 0xFF,
2846 /* Data[4].ctlEdges[5].bChannel */ 0xFF,
2847 /* Data[4].ctlEdges[6].bChannel */ 0xFF,
2848 /* Data[4].ctlEdges[7].bChannel */ 0xFF,
2849 },
2850
2851 {
2852 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
2853 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
2854 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
2855 /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
2856 /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
2857 /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
2858 /* Data[5].ctlEdges[6].bChannel */ 0xFF,
2859 /* Data[5].ctlEdges[7].bChannel */ 0xFF
2860 },
2861
2862 {
2863 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2864 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
2865 /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
2866 /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
2867 /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
2868 /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
2869 /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
2870 /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
2871 },
2872
2873 {
2874 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2875 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
2876 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
2877 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
2878 /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
2879 /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
2880 /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
2881 /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
2882 },
2883
2884 {
2885 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
2886 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
2887 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
2888 /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
2889 /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
2890 /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
2891 /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
2892 /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
2893 }
2894 },
2895 .ctlPowerData_5G = {
2896 {
2897 {
2898 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2899 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2900 }
2901 },
2902 {
2903 {
2904 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2905 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2906 }
2907 },
2908 {
2909 {
2910 CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2911 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2912 }
2913 },
2914 {
2915 {
2916 CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2917 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2918 }
2919 },
2920 {
2921 {
2922 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2923 CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2924 }
2925 },
2926 {
2927 {
2928 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2929 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2930 }
2931 },
2932 {
2933 {
2934 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2935 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2936 }
2937 },
2938 {
2939 {
2940 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2941 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2942 }
2943 },
2944 {
2945 {
2946 CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
2947 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2948 }
2949 },
2950 }
2951};
2952
2953
2954static const struct ar9300_eeprom *ar9300_eep_templates[] = {
2955 &ar9300_default,
2956 &ar9300_x112,
2957 &ar9300_h116,
2958 &ar9300_h112,
2959 &ar9300_x113,
2960};
2961
2962static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id)
2963{
2964#define N_LOOP (sizeof(ar9300_eep_templates) / sizeof(ar9300_eep_templates[0]))
2965 int it;
2966
2967 for (it = 0; it < N_LOOP; it++)
2968 if (ar9300_eep_templates[it]->templateVersion == id)
2969 return ar9300_eep_templates[it];
2970 return NULL;
2971#undef N_LOOP
2972}
2973
2974
629static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) 2975static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
630{ 2976{
631 if (fbin == AR9300_BCHAN_UNUSED) 2977 if (fbin == AR9300_BCHAN_UNUSED)
@@ -639,6 +2985,16 @@ static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
639 return 0; 2985 return 0;
640} 2986}
641 2987
2988static int interpolate(int x, int xa, int xb, int ya, int yb)
2989{
2990 int bf, factor, plus;
2991
2992 bf = 2 * (yb - ya) * (x - xa) / (xb - xa);
2993 factor = bf / 2;
2994 plus = bf % 2;
2995 return ya + factor + plus;
2996}
2997
642static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah, 2998static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
643 enum eeprom_param param) 2999 enum eeprom_param param)
644{ 3000{
@@ -751,6 +3107,36 @@ error:
751 return false; 3107 return false;
752} 3108}
753 3109
3110static bool ar9300_otp_read_word(struct ath_hw *ah, int addr, u32 *data)
3111{
3112 REG_READ(ah, AR9300_OTP_BASE + (4 * addr));
3113
3114 if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS, AR9300_OTP_STATUS_TYPE,
3115 AR9300_OTP_STATUS_VALID, 1000))
3116 return false;
3117
3118 *data = REG_READ(ah, AR9300_OTP_READ_DATA);
3119 return true;
3120}
3121
3122static bool ar9300_read_otp(struct ath_hw *ah, int address, u8 *buffer,
3123 int count)
3124{
3125 u32 data;
3126 int i;
3127
3128 for (i = 0; i < count; i++) {
3129 int offset = 8 * ((address - i) % 4);
3130 if (!ar9300_otp_read_word(ah, (address - i) / 4, &data))
3131 return false;
3132
3133 buffer[i] = (data >> offset) & 0xff;
3134 }
3135
3136 return true;
3137}
3138
3139
754static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference, 3140static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
755 int *length, int *major, int *minor) 3141 int *length, int *major, int *minor)
756{ 3142{
@@ -827,6 +3213,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
827{ 3213{
828 struct ath_common *common = ath9k_hw_common(ah); 3214 struct ath_common *common = ath9k_hw_common(ah);
829 u8 *dptr; 3215 u8 *dptr;
3216 const struct ar9300_eeprom *eep = NULL;
830 3217
831 switch (code) { 3218 switch (code) {
832 case _CompressNone: 3219 case _CompressNone:
@@ -844,13 +3231,14 @@ static int ar9300_compress_decision(struct ath_hw *ah,
844 if (reference == 0) { 3231 if (reference == 0) {
845 dptr = mptr; 3232 dptr = mptr;
846 } else { 3233 } else {
847 if (reference != 2) { 3234 eep = ar9003_eeprom_struct_find_by_id(reference);
3235 if (eep == NULL) {
848 ath_print(common, ATH_DBG_EEPROM, 3236 ath_print(common, ATH_DBG_EEPROM,
849 "cant find reference eeprom" 3237 "cant find reference eeprom"
850 "struct %d\n", reference); 3238 "struct %d\n", reference);
851 return -1; 3239 return -1;
852 } 3240 }
853 memcpy(mptr, &ar9300_default, mdata_size); 3241 memcpy(mptr, eep, mdata_size);
854 } 3242 }
855 ath_print(common, ATH_DBG_EEPROM, 3243 ath_print(common, ATH_DBG_EEPROM,
856 "restore eeprom %d: block, reference %d," 3244 "restore eeprom %d: block, reference %d,"
@@ -866,6 +3254,38 @@ static int ar9300_compress_decision(struct ath_hw *ah,
866 return 0; 3254 return 0;
867} 3255}
868 3256
3257typedef bool (*eeprom_read_op)(struct ath_hw *ah, int address, u8 *buffer,
3258 int count);
3259
3260static bool ar9300_check_header(void *data)
3261{
3262 u32 *word = data;
3263 return !(*word == 0 || *word == ~0);
3264}
3265
3266static bool ar9300_check_eeprom_header(struct ath_hw *ah, eeprom_read_op read,
3267 int base_addr)
3268{
3269 u8 header[4];
3270
3271 if (!read(ah, base_addr, header, 4))
3272 return false;
3273
3274 return ar9300_check_header(header);
3275}
3276
3277static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
3278 int mdata_size)
3279{
3280 struct ath_common *common = ath9k_hw_common(ah);
3281 u16 *data = (u16 *) mptr;
3282 int i;
3283
3284 for (i = 0; i < mdata_size / 2; i++, data++)
3285 ath9k_hw_nvram_read(common, i, data);
3286
3287 return 0;
3288}
869/* 3289/*
870 * Read the configuration data from the eeprom. 3290 * Read the configuration data from the eeprom.
871 * The data can be put in any specified memory buffer. 3291 * The data can be put in any specified memory buffer.
@@ -886,6 +3306,10 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
886 int it; 3306 int it;
887 u16 checksum, mchecksum; 3307 u16 checksum, mchecksum;
888 struct ath_common *common = ath9k_hw_common(ah); 3308 struct ath_common *common = ath9k_hw_common(ah);
3309 eeprom_read_op read;
3310
3311 if (ath9k_hw_use_flash(ah))
3312 return ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
889 3313
890 word = kzalloc(2048, GFP_KERNEL); 3314 word = kzalloc(2048, GFP_KERNEL);
891 if (!word) 3315 if (!word)
@@ -893,14 +3317,42 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
893 3317
894 memcpy(mptr, &ar9300_default, mdata_size); 3318 memcpy(mptr, &ar9300_default, mdata_size);
895 3319
3320 read = ar9300_read_eeprom;
896 cptr = AR9300_BASE_ADDR; 3321 cptr = AR9300_BASE_ADDR;
3322 ath_print(common, ATH_DBG_EEPROM,
3323 "Trying EEPROM accesss at Address 0x%04x\n", cptr);
3324 if (ar9300_check_eeprom_header(ah, read, cptr))
3325 goto found;
3326
3327 cptr = AR9300_BASE_ADDR_512;
3328 ath_print(common, ATH_DBG_EEPROM,
3329 "Trying EEPROM accesss at Address 0x%04x\n", cptr);
3330 if (ar9300_check_eeprom_header(ah, read, cptr))
3331 goto found;
3332
3333 read = ar9300_read_otp;
3334 cptr = AR9300_BASE_ADDR;
3335 ath_print(common, ATH_DBG_EEPROM,
3336 "Trying OTP accesss at Address 0x%04x\n", cptr);
3337 if (ar9300_check_eeprom_header(ah, read, cptr))
3338 goto found;
3339
3340 cptr = AR9300_BASE_ADDR_512;
3341 ath_print(common, ATH_DBG_EEPROM,
3342 "Trying OTP accesss at Address 0x%04x\n", cptr);
3343 if (ar9300_check_eeprom_header(ah, read, cptr))
3344 goto found;
3345
3346 goto fail;
3347
3348found:
3349 ath_print(common, ATH_DBG_EEPROM, "Found valid EEPROM data");
3350
897 for (it = 0; it < MSTATE; it++) { 3351 for (it = 0; it < MSTATE; it++) {
898 if (!ar9300_read_eeprom(ah, cptr, word, COMP_HDR_LEN)) 3352 if (!read(ah, cptr, word, COMP_HDR_LEN))
899 goto fail; 3353 goto fail;
900 3354
901 if ((word[0] == 0 && word[1] == 0 && word[2] == 0 && 3355 if (!ar9300_check_header(word))
902 word[3] == 0) || (word[0] == 0xff && word[1] == 0xff
903 && word[2] == 0xff && word[3] == 0xff))
904 break; 3356 break;
905 3357
906 ar9300_comp_hdr_unpack(word, &code, &reference, 3358 ar9300_comp_hdr_unpack(word, &code, &reference,
@@ -917,8 +3369,7 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
917 } 3369 }
918 3370
919 osize = length; 3371 osize = length;
920 ar9300_read_eeprom(ah, cptr, word, 3372 read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
921 COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
922 checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length); 3373 checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
923 mchecksum = word[COMP_HDR_LEN + osize] | 3374 mchecksum = word[COMP_HDR_LEN + osize] |
924 (word[COMP_HDR_LEN + osize + 1] << 8); 3375 (word[COMP_HDR_LEN + osize + 1] << 8);
@@ -995,9 +3446,9 @@ static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
995static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz) 3446static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
996{ 3447{
997 int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz); 3448 int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
998 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3)); 3449 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
999 REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE, 3450 REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPABIASLVL_MSB, bias >> 2);
1000 ((bias >> 2) & 0x3)); 3451 REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPASHORT2GND, 1);
1001} 3452}
1002 3453
1003static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz) 3454static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
@@ -1100,6 +3551,82 @@ static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
1100 REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg); 3551 REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
1101} 3552}
1102 3553
3554static u16 ar9003_hw_atten_chain_get(struct ath_hw *ah, int chain,
3555 struct ath9k_channel *chan)
3556{
3557 int f[3], t[3];
3558 u16 value;
3559 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
3560
3561 if (chain >= 0 && chain < 3) {
3562 if (IS_CHAN_2GHZ(chan))
3563 return eep->modalHeader2G.xatten1DB[chain];
3564 else if (eep->base_ext2.xatten1DBLow[chain] != 0) {
3565 t[0] = eep->base_ext2.xatten1DBLow[chain];
3566 f[0] = 5180;
3567 t[1] = eep->modalHeader5G.xatten1DB[chain];
3568 f[1] = 5500;
3569 t[2] = eep->base_ext2.xatten1DBHigh[chain];
3570 f[2] = 5785;
3571 value = ar9003_hw_power_interpolate((s32) chan->channel,
3572 f, t, 3);
3573 return value;
3574 } else
3575 return eep->modalHeader5G.xatten1DB[chain];
3576 }
3577
3578 return 0;
3579}
3580
3581
3582static u16 ar9003_hw_atten_chain_get_margin(struct ath_hw *ah, int chain,
3583 struct ath9k_channel *chan)
3584{
3585 int f[3], t[3];
3586 u16 value;
3587 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
3588
3589 if (chain >= 0 && chain < 3) {
3590 if (IS_CHAN_2GHZ(chan))
3591 return eep->modalHeader2G.xatten1Margin[chain];
3592 else if (eep->base_ext2.xatten1MarginLow[chain] != 0) {
3593 t[0] = eep->base_ext2.xatten1MarginLow[chain];
3594 f[0] = 5180;
3595 t[1] = eep->modalHeader5G.xatten1Margin[chain];
3596 f[1] = 5500;
3597 t[2] = eep->base_ext2.xatten1MarginHigh[chain];
3598 f[2] = 5785;
3599 value = ar9003_hw_power_interpolate((s32) chan->channel,
3600 f, t, 3);
3601 return value;
3602 } else
3603 return eep->modalHeader5G.xatten1Margin[chain];
3604 }
3605
3606 return 0;
3607}
3608
3609static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
3610{
3611 int i;
3612 u16 value;
3613 unsigned long ext_atten_reg[3] = {AR_PHY_EXT_ATTEN_CTL_0,
3614 AR_PHY_EXT_ATTEN_CTL_1,
3615 AR_PHY_EXT_ATTEN_CTL_2,
3616 };
3617
3618 /* Test value. if 0 then attenuation is unused. Don't load anything. */
3619 for (i = 0; i < 3; i++) {
3620 value = ar9003_hw_atten_chain_get(ah, i, chan);
3621 REG_RMW_FIELD(ah, ext_atten_reg[i],
3622 AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
3623
3624 value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
3625 REG_RMW_FIELD(ah, ext_atten_reg[i],
3626 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, value);
3627 }
3628}
3629
1103static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) 3630static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
1104{ 3631{
1105 int internal_regulator = 3632 int internal_regulator =
@@ -1131,6 +3658,7 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
1131 ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan)); 3658 ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
1132 ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan)); 3659 ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
1133 ar9003_hw_drive_strength_apply(ah); 3660 ar9003_hw_drive_strength_apply(ah);
3661 ar9003_hw_atten_apply(ah, chan);
1134 ar9003_hw_internal_regulator_apply(ah); 3662 ar9003_hw_internal_regulator_apply(ah);
1135} 3663}
1136 3664
@@ -1192,7 +3720,7 @@ static int ar9003_hw_power_interpolate(int32_t x,
1192 if (hx == lx) 3720 if (hx == lx)
1193 y = ly; 3721 y = ly;
1194 else /* interpolate */ 3722 else /* interpolate */
1195 y = ly + (((x - lx) * (hy - ly)) / (hx - lx)); 3723 y = interpolate(x, lx, hx, ly, hy);
1196 } else /* only low is good, use it */ 3724 } else /* only low is good, use it */
1197 y = ly; 3725 y = ly;
1198 } else if (hhave) /* only high is good, use it */ 3726 } else if (hhave) /* only high is good, use it */
@@ -1640,6 +4168,7 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
1640{ 4168{
1641 int tempSlope = 0; 4169 int tempSlope = 0;
1642 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; 4170 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
4171 int f[3], t[3];
1643 4172
1644 REG_RMW(ah, AR_PHY_TPC_11_B0, 4173 REG_RMW(ah, AR_PHY_TPC_11_B0,
1645 (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), 4174 (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
@@ -1668,7 +4197,16 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
1668 */ 4197 */
1669 if (frequency < 4000) 4198 if (frequency < 4000)
1670 tempSlope = eep->modalHeader2G.tempSlope; 4199 tempSlope = eep->modalHeader2G.tempSlope;
1671 else 4200 else if (eep->base_ext2.tempSlopeLow != 0) {
4201 t[0] = eep->base_ext2.tempSlopeLow;
4202 f[0] = 5180;
4203 t[1] = eep->modalHeader5G.tempSlope;
4204 f[1] = 5500;
4205 t[2] = eep->base_ext2.tempSlopeHigh;
4206 f[2] = 5785;
4207 tempSlope = ar9003_hw_power_interpolate((s32) frequency,
4208 f, t, 3);
4209 } else
1672 tempSlope = eep->modalHeader5G.tempSlope; 4210 tempSlope = eep->modalHeader5G.tempSlope;
1673 4211
1674 REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope); 4212 REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
@@ -1772,25 +4310,23 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
1772 /* so is the high frequency, interpolate */ 4310 /* so is the high frequency, interpolate */
1773 if (hfrequency[ichain] - frequency < 1000) { 4311 if (hfrequency[ichain] - frequency < 1000) {
1774 4312
1775 correction[ichain] = lcorrection[ichain] + 4313 correction[ichain] = interpolate(frequency,
1776 (((frequency - lfrequency[ichain]) * 4314 lfrequency[ichain],
1777 (hcorrection[ichain] - 4315 hfrequency[ichain],
1778 lcorrection[ichain])) / 4316 lcorrection[ichain],
1779 (hfrequency[ichain] - lfrequency[ichain])); 4317 hcorrection[ichain]);
1780 4318
1781 temperature[ichain] = ltemperature[ichain] + 4319 temperature[ichain] = interpolate(frequency,
1782 (((frequency - lfrequency[ichain]) * 4320 lfrequency[ichain],
1783 (htemperature[ichain] - 4321 hfrequency[ichain],
1784 ltemperature[ichain])) / 4322 ltemperature[ichain],
1785 (hfrequency[ichain] - lfrequency[ichain])); 4323 htemperature[ichain]);
1786 4324
1787 voltage[ichain] = 4325 voltage[ichain] = interpolate(frequency,
1788 lvoltage[ichain] + 4326 lfrequency[ichain],
1789 (((frequency - 4327 hfrequency[ichain],
1790 lfrequency[ichain]) * (hvoltage[ichain] - 4328 lvoltage[ichain],
1791 lvoltage[ichain])) 4329 hvoltage[ichain]);
1792 / (hfrequency[ichain] -
1793 lfrequency[ichain]));
1794 } 4330 }
1795 /* only low is good, use it */ 4331 /* only low is good, use it */
1796 else { 4332 else {
@@ -1922,14 +4458,16 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
1922 int i; 4458 int i;
1923 int16_t twiceLargestAntenna; 4459 int16_t twiceLargestAntenna;
1924 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 4460 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
1925 u16 ctlModesFor11a[] = { 4461 static const u16 ctlModesFor11a[] = {
1926 CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 4462 CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
1927 }; 4463 };
1928 u16 ctlModesFor11g[] = { 4464 static const u16 ctlModesFor11g[] = {
1929 CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, 4465 CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT,
1930 CTL_11G_EXT, CTL_2GHT40 4466 CTL_11G_EXT, CTL_2GHT40
1931 }; 4467 };
1932 u16 numCtlModes, *pCtlMode, ctlMode, freq; 4468 u16 numCtlModes;
4469 const u16 *pCtlMode;
4470 u16 ctlMode, freq;
1933 struct chan_centers centers; 4471 struct chan_centers centers;
1934 u8 *ctlIndex; 4472 u8 *ctlIndex;
1935 u8 ctlNum; 4473 u8 ctlNum;
@@ -2134,8 +4672,9 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
2134 struct ath9k_channel *chan, u16 cfgCtl, 4672 struct ath9k_channel *chan, u16 cfgCtl,
2135 u8 twiceAntennaReduction, 4673 u8 twiceAntennaReduction,
2136 u8 twiceMaxRegulatoryPower, 4674 u8 twiceMaxRegulatoryPower,
2137 u8 powerLimit) 4675 u8 powerLimit, bool test)
2138{ 4676{
4677 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
2139 struct ath_common *common = ath9k_hw_common(ah); 4678 struct ath_common *common = ath9k_hw_common(ah);
2140 u8 targetPowerValT2[ar9300RateSize]; 4679 u8 targetPowerValT2[ar9300RateSize];
2141 unsigned int i = 0; 4680 unsigned int i = 0;
@@ -2147,7 +4686,16 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
2147 twiceMaxRegulatoryPower, 4686 twiceMaxRegulatoryPower,
2148 powerLimit); 4687 powerLimit);
2149 4688
2150 while (i < ar9300RateSize) { 4689 regulatory->max_power_level = 0;
4690 for (i = 0; i < ar9300RateSize; i++) {
4691 if (targetPowerValT2[i] > regulatory->max_power_level)
4692 regulatory->max_power_level = targetPowerValT2[i];
4693 }
4694
4695 if (test)
4696 return;
4697
4698 for (i = 0; i < ar9300RateSize; i++) {
2151 ath_print(common, ATH_DBG_EEPROM, 4699 ath_print(common, ATH_DBG_EEPROM,
2152 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 4700 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
2153 i++; 4701 i++;
@@ -2162,9 +4710,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
2162 i++; 4710 i++;
2163 } 4711 }
2164 4712
2165 /* Write target power array to registers */
2166 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
2167
2168 /* 4713 /*
2169 * This is the TX power we send back to driver core, 4714 * This is the TX power we send back to driver core,
2170 * and it can use to pass to userspace to display our 4715 * and it can use to pass to userspace to display our
@@ -2183,7 +4728,10 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
2183 i = ALL_TARGET_HT20_0_8_16; /* ht20 */ 4728 i = ALL_TARGET_HT20_0_8_16; /* ht20 */
2184 4729
2185 ah->txpower_limit = targetPowerValT2[i]; 4730 ah->txpower_limit = targetPowerValT2[i];
4731 regulatory->max_power_level = targetPowerValT2[i];
2186 4732
4733 /* Write target power array to registers */
4734 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
2187 ar9003_hw_calibration_apply(ah, chan->channel); 4735 ar9003_hw_calibration_apply(ah, chan->channel);
2188} 4736}
2189 4737
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 655b3033396..9c1463307f0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -79,6 +79,15 @@
79#define FIXED_CCA_THRESHOLD 15 79#define FIXED_CCA_THRESHOLD 15
80 80
81#define AR9300_BASE_ADDR 0x3ff 81#define AR9300_BASE_ADDR 0x3ff
82#define AR9300_BASE_ADDR_512 0x1ff
83
84#define AR9300_OTP_BASE 0x14000
85#define AR9300_OTP_STATUS 0x15f18
86#define AR9300_OTP_STATUS_TYPE 0x7
87#define AR9300_OTP_STATUS_VALID 0x4
88#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
89#define AR9300_OTP_STATUS_SM_BUSY 0x1
90#define AR9300_OTP_READ_DATA 0x15f1c
82 91
83enum targetPowerHTRates { 92enum targetPowerHTRates {
84 HT_TARGET_RATE_0_8_16, 93 HT_TARGET_RATE_0_8_16,
@@ -236,7 +245,7 @@ struct ar9300_modal_eep_header {
236 u8 thresh62; 245 u8 thresh62;
237 __le32 papdRateMaskHt20; 246 __le32 papdRateMaskHt20;
238 __le32 papdRateMaskHt40; 247 __le32 papdRateMaskHt40;
239 u8 futureModal[24]; 248 u8 futureModal[10];
240} __packed; 249} __packed;
241 250
242struct ar9300_cal_data_per_freq_op_loop { 251struct ar9300_cal_data_per_freq_op_loop {
@@ -269,6 +278,20 @@ struct cal_ctl_data_5g {
269 u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G]; 278 u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
270} __packed; 279} __packed;
271 280
281struct ar9300_BaseExtension_1 {
282 u8 ant_div_control;
283 u8 future[13];
284} __packed;
285
286struct ar9300_BaseExtension_2 {
287 int8_t tempSlopeLow;
288 int8_t tempSlopeHigh;
289 u8 xatten1DBLow[AR9300_MAX_CHAINS];
290 u8 xatten1MarginLow[AR9300_MAX_CHAINS];
291 u8 xatten1DBHigh[AR9300_MAX_CHAINS];
292 u8 xatten1MarginHigh[AR9300_MAX_CHAINS];
293} __packed;
294
272struct ar9300_eeprom { 295struct ar9300_eeprom {
273 u8 eepromVersion; 296 u8 eepromVersion;
274 u8 templateVersion; 297 u8 templateVersion;
@@ -278,6 +301,7 @@ struct ar9300_eeprom {
278 struct ar9300_base_eep_hdr baseEepHeader; 301 struct ar9300_base_eep_hdr baseEepHeader;
279 302
280 struct ar9300_modal_eep_header modalHeader2G; 303 struct ar9300_modal_eep_header modalHeader2G;
304 struct ar9300_BaseExtension_1 base_ext1;
281 u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS]; 305 u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
282 struct ar9300_cal_data_per_freq_op_loop 306 struct ar9300_cal_data_per_freq_op_loop
283 calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS]; 307 calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
@@ -297,6 +321,7 @@ struct ar9300_eeprom {
297 u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G]; 321 u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
298 struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G]; 322 struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
299 struct ar9300_modal_eep_header modalHeader5G; 323 struct ar9300_modal_eep_header modalHeader5G;
324 struct ar9300_BaseExtension_2 base_ext2;
300 u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS]; 325 u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
301 struct ar9300_cal_data_per_freq_op_loop 326 struct ar9300_cal_data_per_freq_op_loop
302 calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS]; 327 calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 3b424ca1ba8..f5896aa3000 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -237,10 +237,12 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
237 struct ath_tx_status *ts) 237 struct ath_tx_status *ts)
238{ 238{
239 struct ar9003_txs *ads; 239 struct ar9003_txs *ads;
240 u32 status;
240 241
241 ads = &ah->ts_ring[ah->ts_tail]; 242 ads = &ah->ts_ring[ah->ts_tail];
242 243
243 if ((ads->status8 & AR_TxDone) == 0) 244 status = ACCESS_ONCE(ads->status8);
245 if ((status & AR_TxDone) == 0)
244 return -EINPROGRESS; 246 return -EINPROGRESS;
245 247
246 ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size; 248 ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
@@ -253,57 +255,58 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
253 return -EIO; 255 return -EIO;
254 } 256 }
255 257
258 if (status & AR_TxOpExceeded)
259 ts->ts_status |= ATH9K_TXERR_XTXOP;
260 ts->ts_rateindex = MS(status, AR_FinalTxIdx);
261 ts->ts_seqnum = MS(status, AR_SeqNum);
262 ts->tid = MS(status, AR_TxTid);
263
256 ts->qid = MS(ads->ds_info, AR_TxQcuNum); 264 ts->qid = MS(ads->ds_info, AR_TxQcuNum);
257 ts->desc_id = MS(ads->status1, AR_TxDescId); 265 ts->desc_id = MS(ads->status1, AR_TxDescId);
258 ts->ts_seqnum = MS(ads->status8, AR_SeqNum);
259 ts->ts_tstamp = ads->status4; 266 ts->ts_tstamp = ads->status4;
260 ts->ts_status = 0; 267 ts->ts_status = 0;
261 ts->ts_flags = 0; 268 ts->ts_flags = 0;
262 269
263 if (ads->status3 & AR_ExcessiveRetries) 270 status = ACCESS_ONCE(ads->status2);
271 ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
272 ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
273 ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
274 if (status & AR_TxBaStatus) {
275 ts->ts_flags |= ATH9K_TX_BA;
276 ts->ba_low = ads->status5;
277 ts->ba_high = ads->status6;
278 }
279
280 status = ACCESS_ONCE(ads->status3);
281 if (status & AR_ExcessiveRetries)
264 ts->ts_status |= ATH9K_TXERR_XRETRY; 282 ts->ts_status |= ATH9K_TXERR_XRETRY;
265 if (ads->status3 & AR_Filtered) 283 if (status & AR_Filtered)
266 ts->ts_status |= ATH9K_TXERR_FILT; 284 ts->ts_status |= ATH9K_TXERR_FILT;
267 if (ads->status3 & AR_FIFOUnderrun) { 285 if (status & AR_FIFOUnderrun) {
268 ts->ts_status |= ATH9K_TXERR_FIFO; 286 ts->ts_status |= ATH9K_TXERR_FIFO;
269 ath9k_hw_updatetxtriglevel(ah, true); 287 ath9k_hw_updatetxtriglevel(ah, true);
270 } 288 }
271 if (ads->status8 & AR_TxOpExceeded) 289 if (status & AR_TxTimerExpired)
272 ts->ts_status |= ATH9K_TXERR_XTXOP;
273 if (ads->status3 & AR_TxTimerExpired)
274 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED; 290 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
275 291 if (status & AR_DescCfgErr)
276 if (ads->status3 & AR_DescCfgErr)
277 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR; 292 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
278 if (ads->status3 & AR_TxDataUnderrun) { 293 if (status & AR_TxDataUnderrun) {
279 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN; 294 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
280 ath9k_hw_updatetxtriglevel(ah, true); 295 ath9k_hw_updatetxtriglevel(ah, true);
281 } 296 }
282 if (ads->status3 & AR_TxDelimUnderrun) { 297 if (status & AR_TxDelimUnderrun) {
283 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN; 298 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
284 ath9k_hw_updatetxtriglevel(ah, true); 299 ath9k_hw_updatetxtriglevel(ah, true);
285 } 300 }
286 if (ads->status2 & AR_TxBaStatus) { 301 ts->ts_shortretry = MS(status, AR_RTSFailCnt);
287 ts->ts_flags |= ATH9K_TX_BA; 302 ts->ts_longretry = MS(status, AR_DataFailCnt);
288 ts->ba_low = ads->status5; 303 ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
289 ts->ba_high = ads->status6;
290 }
291
292 ts->ts_rateindex = MS(ads->status8, AR_FinalTxIdx);
293
294 ts->ts_rssi = MS(ads->status7, AR_TxRSSICombined);
295 ts->ts_rssi_ctl0 = MS(ads->status2, AR_TxRSSIAnt00);
296 ts->ts_rssi_ctl1 = MS(ads->status2, AR_TxRSSIAnt01);
297 ts->ts_rssi_ctl2 = MS(ads->status2, AR_TxRSSIAnt02);
298 ts->ts_rssi_ext0 = MS(ads->status7, AR_TxRSSIAnt10);
299 ts->ts_rssi_ext1 = MS(ads->status7, AR_TxRSSIAnt11);
300 ts->ts_rssi_ext2 = MS(ads->status7, AR_TxRSSIAnt12);
301 ts->ts_shortretry = MS(ads->status3, AR_RTSFailCnt);
302 ts->ts_longretry = MS(ads->status3, AR_DataFailCnt);
303 ts->ts_virtcol = MS(ads->status3, AR_VirtRetryCnt);
304 ts->ts_antenna = 0;
305 304
306 ts->tid = MS(ads->status8, AR_TxTid); 305 status = ACCESS_ONCE(ads->status7);
306 ts->ts_rssi = MS(status, AR_TxRSSICombined);
307 ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
308 ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
309 ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
307 310
308 memset(ads, 0, sizeof(*ads)); 311 memset(ads, 0, sizeof(*ads));
309 312
@@ -407,12 +410,36 @@ static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
407static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds, 410static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
408 u32 aggrLen) 411 u32 aggrLen)
409{ 412{
413#define FIRST_DESC_NDELIMS 60
410 struct ar9003_txc *ads = (struct ar9003_txc *) ds; 414 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
411 415
412 ads->ctl12 |= (AR_IsAggr | AR_MoreAggr); 416 ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
413 417
414 ads->ctl17 &= ~AR_AggrLen; 418 if (ah->ent_mode & AR_ENT_OTP_MPSD) {
415 ads->ctl17 |= SM(aggrLen, AR_AggrLen); 419 u32 ctl17, ndelim;
420 /*
421 * Add delimiter when using RTS/CTS with aggregation
422 * and non enterprise AR9003 card
423 */
424 ctl17 = ads->ctl17;
425 ndelim = MS(ctl17, AR_PadDelim);
426
427 if (ndelim < FIRST_DESC_NDELIMS) {
428 aggrLen += (FIRST_DESC_NDELIMS - ndelim) * 4;
429 ndelim = FIRST_DESC_NDELIMS;
430 }
431
432 ctl17 &= ~AR_AggrLen;
433 ctl17 |= SM(aggrLen, AR_AggrLen);
434
435 ctl17 &= ~AR_PadDelim;
436 ctl17 |= SM(ndelim, AR_PadDelim);
437
438 ads->ctl17 = ctl17;
439 } else {
440 ads->ctl17 &= ~AR_AggrLen;
441 ads->ctl17 |= SM(aggrLen, AR_AggrLen);
442 }
416} 443}
417 444
418static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds, 445static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index 9f2cea70a84..45cc7e80436 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -65,7 +65,7 @@ struct ar9003_rxs {
65 u32 status9; 65 u32 status9;
66 u32 status10; 66 u32 status10;
67 u32 status11; 67 u32 status11;
68} __packed; 68} __packed __aligned(4);
69 69
70/* Transmit Control Descriptor */ 70/* Transmit Control Descriptor */
71struct ar9003_txc { 71struct ar9003_txc {
@@ -93,7 +93,7 @@ struct ar9003_txc {
93 u32 ctl21; /* DMA control 21 */ 93 u32 ctl21; /* DMA control 21 */
94 u32 ctl22; /* DMA control 22 */ 94 u32 ctl22; /* DMA control 22 */
95 u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */ 95 u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
96} __packed; 96} __packed __aligned(4);
97 97
98struct ar9003_txs { 98struct ar9003_txs {
99 u32 ds_info; 99 u32 ds_info;
@@ -105,7 +105,7 @@ struct ar9003_txs {
105 u32 status6; 105 u32 status6;
106 u32 status7; 106 u32 status7;
107 u32 status8; 107 u32 status8;
108} __packed; 108} __packed __aligned(4);
109 109
110void ar9003_hw_attach_mac_ops(struct ath_hw *hw); 110void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
111void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size); 111void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 716db414c25..850bc9866c1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -32,12 +32,12 @@ static void ar9003_paprd_setup_single_table(struct ath_hw *ah)
32{ 32{
33 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; 33 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
34 struct ar9300_modal_eep_header *hdr; 34 struct ar9300_modal_eep_header *hdr;
35 const u32 ctrl0[3] = { 35 static const u32 ctrl0[3] = {
36 AR_PHY_PAPRD_CTRL0_B0, 36 AR_PHY_PAPRD_CTRL0_B0,
37 AR_PHY_PAPRD_CTRL0_B1, 37 AR_PHY_PAPRD_CTRL0_B1,
38 AR_PHY_PAPRD_CTRL0_B2 38 AR_PHY_PAPRD_CTRL0_B2
39 }; 39 };
40 const u32 ctrl1[3] = { 40 static const u32 ctrl1[3] = {
41 AR_PHY_PAPRD_CTRL1_B0, 41 AR_PHY_PAPRD_CTRL1_B0,
42 AR_PHY_PAPRD_CTRL1_B1, 42 AR_PHY_PAPRD_CTRL1_B1,
43 AR_PHY_PAPRD_CTRL1_B2 43 AR_PHY_PAPRD_CTRL1_B2
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 669b777729b..656d8ce251a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -128,7 +128,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
128static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah, 128static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
129 struct ath9k_channel *chan) 129 struct ath9k_channel *chan)
130{ 130{
131 u32 spur_freq[4] = { 2420, 2440, 2464, 2480 }; 131 static const u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
132 int cur_bb_spur, negative = 0, cck_spur_freq; 132 int cur_bb_spur, negative = 0, cck_spur_freq;
133 int i; 133 int i;
134 134
@@ -614,7 +614,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
614 channel->max_antenna_gain * 2, 614 channel->max_antenna_gain * 2,
615 channel->max_power * 2, 615 channel->max_power * 2,
616 min((u32) MAX_RATE_POWER, 616 min((u32) MAX_RATE_POWER,
617 (u32) regulatory->power_limit)); 617 (u32) regulatory->power_limit), false);
618 618
619 return 0; 619 return 0;
620} 620}
@@ -1023,25 +1023,25 @@ static void ar9003_hw_do_getnf(struct ath_hw *ah,
1023 int16_t nf; 1023 int16_t nf;
1024 1024
1025 nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR); 1025 nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
1026 nfarray[0] = sign_extend(nf, 9); 1026 nfarray[0] = sign_extend32(nf, 8);
1027 1027
1028 nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR); 1028 nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
1029 nfarray[1] = sign_extend(nf, 9); 1029 nfarray[1] = sign_extend32(nf, 8);
1030 1030
1031 nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR); 1031 nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
1032 nfarray[2] = sign_extend(nf, 9); 1032 nfarray[2] = sign_extend32(nf, 8);
1033 1033
1034 if (!IS_CHAN_HT40(ah->curchan)) 1034 if (!IS_CHAN_HT40(ah->curchan))
1035 return; 1035 return;
1036 1036
1037 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); 1037 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
1038 nfarray[3] = sign_extend(nf, 9); 1038 nfarray[3] = sign_extend32(nf, 8);
1039 1039
1040 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR); 1040 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
1041 nfarray[4] = sign_extend(nf, 9); 1041 nfarray[4] = sign_extend32(nf, 8);
1042 1042
1043 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR); 1043 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
1044 nfarray[5] = sign_extend(nf, 9); 1044 nfarray[5] = sign_extend32(nf, 8);
1045} 1045}
1046 1046
1047static void ar9003_hw_set_nf_limits(struct ath_hw *ah) 1047static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
@@ -1113,10 +1113,55 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1113 aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK; 1113 aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
1114} 1114}
1115 1115
1116static void ar9003_hw_set_radar_params(struct ath_hw *ah,
1117 struct ath_hw_radar_conf *conf)
1118{
1119 u32 radar_0 = 0, radar_1 = 0;
1120
1121 if (!conf) {
1122 REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
1123 return;
1124 }
1125
1126 radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
1127 radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
1128 radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
1129 radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
1130 radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
1131 radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
1132
1133 radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
1134 radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
1135 radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
1136 radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
1137 radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
1138
1139 REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
1140 REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
1141 if (conf->ext_channel)
1142 REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
1143 else
1144 REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
1145}
1146
1147static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
1148{
1149 struct ath_hw_radar_conf *conf = &ah->radar_conf;
1150
1151 conf->fir_power = -28;
1152 conf->radar_rssi = 0;
1153 conf->pulse_height = 10;
1154 conf->pulse_rssi = 24;
1155 conf->pulse_inband = 8;
1156 conf->pulse_maxlen = 255;
1157 conf->pulse_inband_step = 12;
1158 conf->radar_inband = 8;
1159}
1160
1116void ar9003_hw_attach_phy_ops(struct ath_hw *ah) 1161void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1117{ 1162{
1118 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 1163 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1119 const u32 ar9300_cca_regs[6] = { 1164 static const u32 ar9300_cca_regs[6] = {
1120 AR_PHY_CCA_0, 1165 AR_PHY_CCA_0,
1121 AR_PHY_CCA_1, 1166 AR_PHY_CCA_1,
1122 AR_PHY_CCA_2, 1167 AR_PHY_CCA_2,
@@ -1141,8 +1186,10 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1141 priv_ops->ani_control = ar9003_hw_ani_control; 1186 priv_ops->ani_control = ar9003_hw_ani_control;
1142 priv_ops->do_getnf = ar9003_hw_do_getnf; 1187 priv_ops->do_getnf = ar9003_hw_do_getnf;
1143 priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs; 1188 priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
1189 priv_ops->set_radar_params = ar9003_hw_set_radar_params;
1144 1190
1145 ar9003_hw_set_nf_limits(ah); 1191 ar9003_hw_set_nf_limits(ah);
1192 ar9003_hw_set_radar_conf(ah);
1146 memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs)); 1193 memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
1147} 1194}
1148 1195
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 0d0bec3628e..0b4b4704b1f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -87,33 +87,19 @@ struct ath_config {
87/** 87/**
88 * enum buffer_type - Buffer type flags 88 * enum buffer_type - Buffer type flags
89 * 89 *
90 * @BUF_HT: Send this buffer using HT capabilities
91 * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX) 90 * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
92 * @BUF_AGGR: Indicates whether the buffer can be aggregated 91 * @BUF_AGGR: Indicates whether the buffer can be aggregated
93 * (used in aggregation scheduling) 92 * (used in aggregation scheduling)
94 * @BUF_RETRY: Indicates whether the buffer is retried
95 * @BUF_XRETRY: To denote excessive retries of the buffer 93 * @BUF_XRETRY: To denote excessive retries of the buffer
96 */ 94 */
97enum buffer_type { 95enum buffer_type {
98 BUF_HT = BIT(1),
99 BUF_AMPDU = BIT(2), 96 BUF_AMPDU = BIT(2),
100 BUF_AGGR = BIT(3), 97 BUF_AGGR = BIT(3),
101 BUF_RETRY = BIT(4),
102 BUF_XRETRY = BIT(5), 98 BUF_XRETRY = BIT(5),
103}; 99};
104 100
105#define bf_nframes bf_state.bfs_nframes
106#define bf_al bf_state.bfs_al
107#define bf_frmlen bf_state.bfs_frmlen
108#define bf_retries bf_state.bfs_retries
109#define bf_seqno bf_state.bfs_seqno
110#define bf_tidno bf_state.bfs_tidno
111#define bf_keyix bf_state.bfs_keyix
112#define bf_keytype bf_state.bfs_keytype
113#define bf_isht(bf) (bf->bf_state.bf_type & BUF_HT)
114#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU) 101#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
115#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR) 102#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
116#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
117#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY) 103#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
118 104
119#define ATH_TXSTATUS_RING_SIZE 64 105#define ATH_TXSTATUS_RING_SIZE 64
@@ -178,8 +164,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
178 164
179/* returns delimiter padding required given the packet length */ 165/* returns delimiter padding required given the packet length */
180#define ATH_AGGR_GET_NDELIM(_len) \ 166#define ATH_AGGR_GET_NDELIM(_len) \
181 (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \ 167 (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
182 (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2) 168 DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ))
183 169
184#define BAW_WITHIN(_start, _bawsz, _seqno) \ 170#define BAW_WITHIN(_start, _bawsz, _seqno) \
185 ((((_seqno) - (_start)) & 4095) < (_bawsz)) 171 ((((_seqno) - (_start)) & 4095) < (_bawsz))
@@ -196,7 +182,6 @@ enum ATH_AGGR_STATUS {
196 182
197#define ATH_TXFIFO_DEPTH 8 183#define ATH_TXFIFO_DEPTH 8
198struct ath_txq { 184struct ath_txq {
199 int axq_class;
200 u32 axq_qnum; 185 u32 axq_qnum;
201 u32 *axq_link; 186 u32 *axq_link;
202 struct list_head axq_q; 187 struct list_head axq_q;
@@ -209,27 +194,28 @@ struct ath_txq {
209 struct list_head txq_fifo_pending; 194 struct list_head txq_fifo_pending;
210 u8 txq_headidx; 195 u8 txq_headidx;
211 u8 txq_tailidx; 196 u8 txq_tailidx;
197 int pending_frames;
212}; 198};
213 199
214struct ath_atx_ac { 200struct ath_atx_ac {
201 struct ath_txq *txq;
215 int sched; 202 int sched;
216 int qnum;
217 struct list_head list; 203 struct list_head list;
218 struct list_head tid_q; 204 struct list_head tid_q;
219}; 205};
220 206
207struct ath_frame_info {
208 int framelen;
209 u32 keyix;
210 enum ath9k_key_type keytype;
211 u8 retries;
212 u16 seqno;
213};
214
221struct ath_buf_state { 215struct ath_buf_state {
222 int bfs_nframes;
223 u16 bfs_al;
224 u16 bfs_frmlen;
225 int bfs_seqno;
226 int bfs_tidno;
227 int bfs_retries;
228 u8 bf_type; 216 u8 bf_type;
229 u8 bfs_paprd; 217 u8 bfs_paprd;
230 unsigned long bfs_paprd_timestamp; 218 enum ath9k_internal_frame_type bfs_ftype;
231 u32 bfs_keyix;
232 enum ath9k_key_type bfs_keytype;
233}; 219};
234 220
235struct ath_buf { 221struct ath_buf {
@@ -242,7 +228,6 @@ struct ath_buf {
242 dma_addr_t bf_daddr; /* physical addr of desc */ 228 dma_addr_t bf_daddr; /* physical addr of desc */
243 dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */ 229 dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
244 bool bf_stale; 230 bool bf_stale;
245 bool bf_tx_aborted;
246 u16 bf_flags; 231 u16 bf_flags;
247 struct ath_buf_state bf_state; 232 struct ath_buf_state bf_state;
248 struct ath_wiphy *aphy; 233 struct ath_wiphy *aphy;
@@ -271,7 +256,6 @@ struct ath_node {
271 struct ath_atx_ac ac[WME_NUM_AC]; 256 struct ath_atx_ac ac[WME_NUM_AC];
272 u16 maxampdu; 257 u16 maxampdu;
273 u8 mpdudensity; 258 u8 mpdudensity;
274 int last_rssi;
275}; 259};
276 260
277#define AGGR_CLEANUP BIT(1) 261#define AGGR_CLEANUP BIT(1)
@@ -280,6 +264,7 @@ struct ath_node {
280 264
281struct ath_tx_control { 265struct ath_tx_control {
282 struct ath_txq *txq; 266 struct ath_txq *txq;
267 struct ath_node *an;
283 int if_id; 268 int if_id;
284 enum ath9k_internal_frame_type frame_type; 269 enum ath9k_internal_frame_type frame_type;
285 u8 paprd; 270 u8 paprd;
@@ -292,12 +277,11 @@ struct ath_tx_control {
292struct ath_tx { 277struct ath_tx {
293 u16 seq_no; 278 u16 seq_no;
294 u32 txqsetup; 279 u32 txqsetup;
295 int hwq_map[WME_NUM_AC];
296 spinlock_t txbuflock; 280 spinlock_t txbuflock;
297 struct list_head txbuf; 281 struct list_head txbuf;
298 struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; 282 struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
299 struct ath_descdma txdma; 283 struct ath_descdma txdma;
300 int pending_frames[WME_NUM_AC]; 284 struct ath_txq *txq_map[WME_NUM_AC];
301}; 285};
302 286
303struct ath_rx_edma { 287struct ath_rx_edma {
@@ -311,7 +295,6 @@ struct ath_rx {
311 u8 rxotherant; 295 u8 rxotherant;
312 u32 *rxlink; 296 u32 *rxlink;
313 unsigned int rxfilter; 297 unsigned int rxfilter;
314 spinlock_t pcu_lock;
315 spinlock_t rxbuflock; 298 spinlock_t rxbuflock;
316 struct list_head rxbuf; 299 struct list_head rxbuf;
317 struct ath_descdma rxdma; 300 struct ath_descdma rxdma;
@@ -328,7 +311,6 @@ void ath_rx_cleanup(struct ath_softc *sc);
328int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp); 311int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
329struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); 312struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
330void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); 313void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
331int ath_tx_setup(struct ath_softc *sc, int haltype);
332void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); 314void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
333void ath_draintxq(struct ath_softc *sc, 315void ath_draintxq(struct ath_softc *sc,
334 struct ath_txq *txq, bool retry_tx); 316 struct ath_txq *txq, bool retry_tx);
@@ -343,7 +325,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
343 struct ath_tx_control *txctl); 325 struct ath_tx_control *txctl);
344void ath_tx_tasklet(struct ath_softc *sc); 326void ath_tx_tasklet(struct ath_softc *sc);
345void ath_tx_edma_tasklet(struct ath_softc *sc); 327void ath_tx_edma_tasklet(struct ath_softc *sc);
346void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
347int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 328int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
348 u16 tid, u16 *ssn); 329 u16 tid, u16 *ssn);
349void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 330void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
@@ -601,13 +582,14 @@ struct ath_softc {
601 struct ath_hw *sc_ah; 582 struct ath_hw *sc_ah;
602 void __iomem *mem; 583 void __iomem *mem;
603 int irq; 584 int irq;
604 spinlock_t sc_resetlock;
605 spinlock_t sc_serial_rw; 585 spinlock_t sc_serial_rw;
606 spinlock_t sc_pm_lock; 586 spinlock_t sc_pm_lock;
587 spinlock_t sc_pcu_lock;
607 struct mutex mutex; 588 struct mutex mutex;
608 struct work_struct paprd_work; 589 struct work_struct paprd_work;
609 struct work_struct hw_check_work; 590 struct work_struct hw_check_work;
610 struct completion paprd_complete; 591 struct completion paprd_complete;
592 bool paprd_pending;
611 593
612 u32 intrstatus; 594 u32 intrstatus;
613 u32 sc_flags; /* SC_OP_* */ 595 u32 sc_flags; /* SC_OP_* */
@@ -665,11 +647,11 @@ struct ath_wiphy {
665 bool idle; 647 bool idle;
666 int chan_idx; 648 int chan_idx;
667 int chan_is_ht; 649 int chan_is_ht;
650 int last_rssi;
668}; 651};
669 652
670void ath9k_tasklet(unsigned long data); 653void ath9k_tasklet(unsigned long data);
671int ath_reset(struct ath_softc *sc, bool retry_tx); 654int ath_reset(struct ath_softc *sc, bool retry_tx);
672int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
673int ath_cabq_update(struct ath_softc *); 655int ath_cabq_update(struct ath_softc *);
674 656
675static inline void ath_read_cachesize(struct ath_common *common, int *csz) 657static inline void ath_read_cachesize(struct ath_common *common, int *csz)
@@ -718,7 +700,7 @@ void ath9k_ps_restore(struct ath_softc *sc);
718void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 700void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
719int ath9k_wiphy_add(struct ath_softc *sc); 701int ath9k_wiphy_add(struct ath_softc *sc);
720int ath9k_wiphy_del(struct ath_wiphy *aphy); 702int ath9k_wiphy_del(struct ath_wiphy *aphy);
721void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); 703void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype);
722int ath9k_wiphy_pause(struct ath_wiphy *aphy); 704int ath9k_wiphy_pause(struct ath_wiphy *aphy);
723int ath9k_wiphy_unpause(struct ath_wiphy *aphy); 705int ath9k_wiphy_unpause(struct ath_wiphy *aphy);
724int ath9k_wiphy_select(struct ath_wiphy *aphy); 706int ath9k_wiphy_select(struct ath_wiphy *aphy);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 19891e7d49a..30724a4e8bb 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -28,7 +28,7 @@ int ath_beaconq_config(struct ath_softc *sc)
28 struct ath_hw *ah = sc->sc_ah; 28 struct ath_hw *ah = sc->sc_ah;
29 struct ath_common *common = ath9k_hw_common(ah); 29 struct ath_common *common = ath9k_hw_common(ah);
30 struct ath9k_tx_queue_info qi, qi_be; 30 struct ath9k_tx_queue_info qi, qi_be;
31 int qnum; 31 struct ath_txq *txq;
32 32
33 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); 33 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
34 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { 34 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
@@ -38,8 +38,8 @@ int ath_beaconq_config(struct ath_softc *sc)
38 qi.tqi_cwmax = 0; 38 qi.tqi_cwmax = 0;
39 } else { 39 } else {
40 /* Adhoc mode; important thing is to use 2x cwmin. */ 40 /* Adhoc mode; important thing is to use 2x cwmin. */
41 qnum = sc->tx.hwq_map[WME_AC_BE]; 41 txq = sc->tx.txq_map[WME_AC_BE];
42 ath9k_hw_get_txq_props(ah, qnum, &qi_be); 42 ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
43 qi.tqi_aifs = qi_be.tqi_aifs; 43 qi.tqi_aifs = qi_be.tqi_aifs;
44 qi.tqi_cwmin = 4*qi_be.tqi_cwmin; 44 qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
45 qi.tqi_cwmax = qi_be.tqi_cwmax; 45 qi.tqi_cwmax = qi_be.tqi_cwmax;
@@ -109,6 +109,25 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
109 series, 4, 0); 109 series, 4, 0);
110} 110}
111 111
112static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
113{
114 struct ath_wiphy *aphy = hw->priv;
115 struct ath_softc *sc = aphy->sc;
116 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
117 struct ath_tx_control txctl;
118
119 memset(&txctl, 0, sizeof(struct ath_tx_control));
120 txctl.txq = sc->beacon.cabq;
121
122 ath_print(common, ATH_DBG_XMIT,
123 "transmitting CABQ packet, skb: %p\n", skb);
124
125 if (ath_tx_start(hw, skb, &txctl) != 0) {
126 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
127 dev_kfree_skb_any(skb);
128 }
129}
130
112static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, 131static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
113 struct ieee80211_vif *vif) 132 struct ieee80211_vif *vif)
114{ 133{
@@ -503,7 +522,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
503 522
504 /* Set the computed AP beacon timers */ 523 /* Set the computed AP beacon timers */
505 524
506 ath9k_hw_set_interrupts(ah, 0); 525 ath9k_hw_disable_interrupts(ah);
507 ath9k_beacon_init(sc, nexttbtt, intval); 526 ath9k_beacon_init(sc, nexttbtt, intval);
508 sc->beacon.bmisscnt = 0; 527 sc->beacon.bmisscnt = 0;
509 ath9k_hw_set_interrupts(ah, ah->imask); 528 ath9k_hw_set_interrupts(ah, ah->imask);
@@ -638,7 +657,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
638 657
639 /* Set the computed STA beacon timers */ 658 /* Set the computed STA beacon timers */
640 659
641 ath9k_hw_set_interrupts(ah, 0); 660 ath9k_hw_disable_interrupts(ah);
642 ath9k_hw_set_sta_beacon_timers(ah, &bs); 661 ath9k_hw_set_sta_beacon_timers(ah, &bs);
643 ah->imask |= ATH9K_INT_BMISS; 662 ah->imask |= ATH9K_INT_BMISS;
644 ath9k_hw_set_interrupts(ah, ah->imask); 663 ath9k_hw_set_interrupts(ah, ah->imask);
@@ -686,7 +705,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
686 705
687 /* Set the computed ADHOC beacon timers */ 706 /* Set the computed ADHOC beacon timers */
688 707
689 ath9k_hw_set_interrupts(ah, 0); 708 ath9k_hw_disable_interrupts(ah);
690 ath9k_beacon_init(sc, nexttbtt, intval); 709 ath9k_beacon_init(sc, nexttbtt, intval);
691 sc->beacon.bmisscnt = 0; 710 sc->beacon.bmisscnt = 0;
692 ath9k_hw_set_interrupts(ah, ah->imask); 711 ath9k_hw_set_interrupts(ah, ah->imask);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index f43a2d98421..48b07c319a7 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -107,12 +107,10 @@ static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
107/* 107/*
108 * Update internal channel flags. 108 * Update internal channel flags.
109 */ 109 */
110void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw, 110void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
111 struct ath9k_channel *ichan) 111 struct ieee80211_channel *chan,
112 enum nl80211_channel_type channel_type)
112{ 113{
113 struct ieee80211_channel *chan = hw->conf.channel;
114 struct ieee80211_conf *conf = &hw->conf;
115
116 ichan->channel = chan->center_freq; 114 ichan->channel = chan->center_freq;
117 ichan->chan = chan; 115 ichan->chan = chan;
118 116
@@ -124,9 +122,8 @@ void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
124 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM; 122 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
125 } 123 }
126 124
127 if (conf_is_ht(conf)) 125 if (channel_type != NL80211_CHAN_NO_HT)
128 ichan->chanmode = ath9k_get_extchanmode(chan, 126 ichan->chanmode = ath9k_get_extchanmode(chan, channel_type);
129 conf->channel_type);
130} 127}
131EXPORT_SYMBOL(ath9k_cmn_update_ichannel); 128EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
132 129
@@ -142,7 +139,7 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
142 139
143 chan_idx = curchan->hw_value; 140 chan_idx = curchan->hw_value;
144 channel = &ah->channels[chan_idx]; 141 channel = &ah->channels[chan_idx];
145 ath9k_cmn_update_ichannel(hw, channel); 142 ath9k_cmn_update_ichannel(channel, curchan, hw->conf.channel_type);
146 143
147 return channel; 144 return channel;
148} 145}
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index fea3b331539..4c04ee85ff0 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -31,10 +31,11 @@
31#define WME_MAX_BA WME_BA_BMP_SIZE 31#define WME_MAX_BA WME_BA_BMP_SIZE
32#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA) 32#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
33 33
34#define WME_AC_BE 0 34/* These must match mac80211 skb queue mapping numbers */
35#define WME_AC_BK 1 35#define WME_AC_VO 0
36#define WME_AC_VI 2 36#define WME_AC_VI 1
37#define WME_AC_VO 3 37#define WME_AC_BE 2
38#define WME_AC_BK 3
38#define WME_NUM_AC 4 39#define WME_NUM_AC 4
39 40
40#define ATH_RSSI_DUMMY_MARKER 0x127 41#define ATH_RSSI_DUMMY_MARKER 0x127
@@ -62,8 +63,9 @@ enum ath_stomp_type {
62 63
63int ath9k_cmn_padpos(__le16 frame_control); 64int ath9k_cmn_padpos(__le16 frame_control);
64int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 65int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
65void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw, 66void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
66 struct ath9k_channel *ichan); 67 struct ieee80211_channel *chan,
68 enum nl80211_channel_type channel_type);
67struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, 69struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
68 struct ath_hw *ah); 70 struct ath_hw *ah);
69int ath9k_cmn_count_streams(unsigned int chainmask, int max); 71int ath9k_cmn_count_streams(unsigned int chainmask, int max);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 43e71a944cb..0c3c74c157f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -461,16 +461,16 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
461 461
462 /* Put variable-length stuff down here, and check for overflows. */ 462 /* Put variable-length stuff down here, and check for overflows. */
463 for (i = 0; i < sc->num_sec_wiphy; i++) { 463 for (i = 0; i < sc->num_sec_wiphy; i++) {
464 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 464 struct ath_wiphy *aphy_tmp = sc->sec_wiphy[i];
465 if (aphy == NULL) 465 if (aphy_tmp == NULL)
466 continue; 466 continue;
467 chan = aphy->hw->conf.channel; 467 chan = aphy_tmp->hw->conf.channel;
468 len += snprintf(buf + len, sizeof(buf) - len, 468 len += snprintf(buf + len, sizeof(buf) - len,
469 "secondary: %s (%s chan=%d ht=%d)\n", 469 "secondary: %s (%s chan=%d ht=%d)\n",
470 wiphy_name(aphy->hw->wiphy), 470 wiphy_name(aphy_tmp->hw->wiphy),
471 ath_wiphy_state_str(aphy->state), 471 ath_wiphy_state_str(aphy_tmp->state),
472 ieee80211_frequency_to_channel(chan->center_freq), 472 ieee80211_frequency_to_channel(chan->center_freq),
473 aphy->chan_is_ht); 473 aphy_tmp->chan_is_ht);
474 } 474 }
475 if (len > sizeof(buf)) 475 if (len > sizeof(buf))
476 len = sizeof(buf); 476 len = sizeof(buf);
@@ -585,10 +585,10 @@ static const struct file_operations fops_wiphy = {
585 do { \ 585 do { \
586 len += snprintf(buf + len, size - len, \ 586 len += snprintf(buf + len, size - len, \
587 "%s%13u%11u%10u%10u\n", str, \ 587 "%s%13u%11u%10u%10u\n", str, \
588 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \ 588 sc->debug.stats.txstats[WME_AC_BE].elem, \
589 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \ 589 sc->debug.stats.txstats[WME_AC_BK].elem, \
590 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \ 590 sc->debug.stats.txstats[WME_AC_VI].elem, \
591 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \ 591 sc->debug.stats.txstats[WME_AC_VO].elem); \
592} while(0) 592} while(0)
593 593
594static ssize_t read_file_xmit(struct file *file, char __user *user_buf, 594static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
@@ -630,33 +630,35 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
630 return retval; 630 return retval;
631} 631}
632 632
633void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 633void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
634 struct ath_buf *bf, struct ath_tx_status *ts) 634 struct ath_tx_status *ts)
635{ 635{
636 TX_STAT_INC(txq->axq_qnum, tx_pkts_all); 636 int qnum = skb_get_queue_mapping(bf->bf_mpdu);
637 sc->debug.stats.txstats[txq->axq_qnum].tx_bytes_all += bf->bf_mpdu->len; 637
638 TX_STAT_INC(qnum, tx_pkts_all);
639 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
638 640
639 if (bf_isampdu(bf)) { 641 if (bf_isampdu(bf)) {
640 if (bf_isxretried(bf)) 642 if (bf_isxretried(bf))
641 TX_STAT_INC(txq->axq_qnum, a_xretries); 643 TX_STAT_INC(qnum, a_xretries);
642 else 644 else
643 TX_STAT_INC(txq->axq_qnum, a_completed); 645 TX_STAT_INC(qnum, a_completed);
644 } else { 646 } else {
645 TX_STAT_INC(txq->axq_qnum, completed); 647 TX_STAT_INC(qnum, completed);
646 } 648 }
647 649
648 if (ts->ts_status & ATH9K_TXERR_FIFO) 650 if (ts->ts_status & ATH9K_TXERR_FIFO)
649 TX_STAT_INC(txq->axq_qnum, fifo_underrun); 651 TX_STAT_INC(qnum, fifo_underrun);
650 if (ts->ts_status & ATH9K_TXERR_XTXOP) 652 if (ts->ts_status & ATH9K_TXERR_XTXOP)
651 TX_STAT_INC(txq->axq_qnum, xtxop); 653 TX_STAT_INC(qnum, xtxop);
652 if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED) 654 if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
653 TX_STAT_INC(txq->axq_qnum, timer_exp); 655 TX_STAT_INC(qnum, timer_exp);
654 if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR) 656 if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
655 TX_STAT_INC(txq->axq_qnum, desc_cfg_err); 657 TX_STAT_INC(qnum, desc_cfg_err);
656 if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN) 658 if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
657 TX_STAT_INC(txq->axq_qnum, data_underrun); 659 TX_STAT_INC(qnum, data_underrun);
658 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN) 660 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
659 TX_STAT_INC(txq->axq_qnum, delim_underrun); 661 TX_STAT_INC(qnum, delim_underrun);
660} 662}
661 663
662static const struct file_operations fops_xmit = { 664static const struct file_operations fops_xmit = {
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index bb0823242ba..646ff7e04c8 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -169,8 +169,8 @@ void ath9k_exit_debug(struct ath_hw *ah);
169int ath9k_debug_create_root(void); 169int ath9k_debug_create_root(void);
170void ath9k_debug_remove_root(void); 170void ath9k_debug_remove_root(void);
171void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 171void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
172void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 172void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
173 struct ath_buf *bf, struct ath_tx_status *ts); 173 struct ath_tx_status *ts);
174void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs); 174void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
175 175
176#else 176#else
@@ -199,7 +199,6 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
199} 199}
200 200
201static inline void ath_debug_stat_tx(struct ath_softc *sc, 201static inline void ath_debug_stat_tx(struct ath_softc *sc,
202 struct ath_txq *txq,
203 struct ath_buf *bf, 202 struct ath_buf *bf,
204 struct ath_tx_status *ts) 203 struct ath_tx_status *ts)
205{ 204{
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index dd59f09441a..8a644fced5c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -681,7 +681,8 @@ struct eeprom_ops {
681 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan); 681 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
682 void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan, 682 void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
683 u16 cfgCtl, u8 twiceAntennaReduction, 683 u16 cfgCtl, u8 twiceAntennaReduction,
684 u8 twiceMaxRegulatoryPower, u8 powerLimit); 684 u8 twiceMaxRegulatoryPower, u8 powerLimit,
685 bool test);
685 u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz); 686 u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
686}; 687};
687 688
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 4fa4d8e28c6..c2481b3ac7e 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -534,7 +534,9 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
534 u16 twiceMinEdgePower; 534 u16 twiceMinEdgePower;
535 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 535 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
536 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 536 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
537 u16 numCtlModes, *pCtlMode, ctlMode, freq; 537 u16 numCtlModes;
538 const u16 *pCtlMode;
539 u16 ctlMode, freq;
538 struct chan_centers centers; 540 struct chan_centers centers;
539 struct cal_ctl_data_4k *rep; 541 struct cal_ctl_data_4k *rep;
540 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; 542 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
@@ -550,10 +552,10 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
550 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = { 552 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
551 0, {0, 0, 0, 0} 553 0, {0, 0, 0, 0}
552 }; 554 };
553 u16 ctlModesFor11g[] = 555 static const u16 ctlModesFor11g[] = {
554 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, 556 CTL_11B, CTL_11G, CTL_2GHT20,
555 CTL_2GHT40 557 CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
556 }; 558 };
557 559
558 ath9k_hw_get_channel_centers(ah, chan, &centers); 560 ath9k_hw_get_channel_centers(ah, chan, &centers);
559 561
@@ -726,7 +728,7 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
726 u16 cfgCtl, 728 u16 cfgCtl,
727 u8 twiceAntennaReduction, 729 u8 twiceAntennaReduction,
728 u8 twiceMaxRegulatoryPower, 730 u8 twiceMaxRegulatoryPower,
729 u8 powerLimit) 731 u8 powerLimit, bool test)
730{ 732{
731 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 733 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
732 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; 734 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
@@ -751,15 +753,20 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
751 753
752 ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset); 754 ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset);
753 755
756 regulatory->max_power_level = 0;
754 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { 757 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
755 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); 758 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
756 if (ratesArray[i] > AR5416_MAX_RATE_POWER) 759 if (ratesArray[i] > AR5416_MAX_RATE_POWER)
757 ratesArray[i] = AR5416_MAX_RATE_POWER; 760 ratesArray[i] = AR5416_MAX_RATE_POWER;
761
762 if (ratesArray[i] > regulatory->max_power_level)
763 regulatory->max_power_level = ratesArray[i];
758 } 764 }
759 765
766 if (test)
767 return;
760 768
761 /* Update regulatory */ 769 /* Update regulatory */
762
763 i = rate6mb; 770 i = rate6mb;
764 if (IS_CHAN_HT40(chan)) 771 if (IS_CHAN_HT40(chan))
765 i = rateHt40_0; 772 i = rateHt40_0;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 195406db3bd..bcb9ed39c04 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -37,10 +37,10 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
37 int addr, eep_start_loc; 37 int addr, eep_start_loc;
38 eep_data = (u16 *)eep; 38 eep_data = (u16 *)eep;
39 39
40 if (AR9287_HTC_DEVID(ah)) 40 if (!common->driver_info)
41 eep_start_loc = AR9287_HTC_EEP_START_LOC;
42 else
43 eep_start_loc = AR9287_EEP_START_LOC; 41 eep_start_loc = AR9287_EEP_START_LOC;
42 else
43 eep_start_loc = AR9287_HTC_EEP_START_LOC;
44 44
45 if (!ath9k_hw_use_flash(ah)) { 45 if (!ath9k_hw_use_flash(ah)) {
46 ath_print(common, ATH_DBG_EEPROM, 46 ath_print(common, ATH_DBG_EEPROM,
@@ -626,13 +626,13 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
626 struct cal_target_power_ht targetPowerHt20, 626 struct cal_target_power_ht targetPowerHt20,
627 targetPowerHt40 = {0, {0, 0, 0, 0} }; 627 targetPowerHt40 = {0, {0, 0, 0, 0} };
628 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 628 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
629 u16 ctlModesFor11g[] = {CTL_11B, 629 static const u16 ctlModesFor11g[] = {
630 CTL_11G, 630 CTL_11B, CTL_11G, CTL_2GHT20,
631 CTL_2GHT20, 631 CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
632 CTL_11B_EXT, 632 };
633 CTL_11G_EXT, 633 u16 numCtlModes = 0;
634 CTL_2GHT40}; 634 const u16 *pCtlMode = NULL;
635 u16 numCtlModes = 0, *pCtlMode = NULL, ctlMode, freq; 635 u16 ctlMode, freq;
636 struct chan_centers centers; 636 struct chan_centers centers;
637 int tx_chainmask; 637 int tx_chainmask;
638 u16 twiceMinEdgePower; 638 u16 twiceMinEdgePower;
@@ -853,7 +853,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
853 struct ath9k_channel *chan, u16 cfgCtl, 853 struct ath9k_channel *chan, u16 cfgCtl,
854 u8 twiceAntennaReduction, 854 u8 twiceAntennaReduction,
855 u8 twiceMaxRegulatoryPower, 855 u8 twiceMaxRegulatoryPower,
856 u8 powerLimit) 856 u8 powerLimit, bool test)
857{ 857{
858 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 858 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
859 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; 859 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
@@ -877,12 +877,26 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
877 877
878 ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset); 878 ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset);
879 879
880 regulatory->max_power_level = 0;
880 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { 881 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
881 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); 882 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
882 if (ratesArray[i] > AR9287_MAX_RATE_POWER) 883 if (ratesArray[i] > AR9287_MAX_RATE_POWER)
883 ratesArray[i] = AR9287_MAX_RATE_POWER; 884 ratesArray[i] = AR9287_MAX_RATE_POWER;
885
886 if (ratesArray[i] > regulatory->max_power_level)
887 regulatory->max_power_level = ratesArray[i];
884 } 888 }
885 889
890 if (test)
891 return;
892
893 if (IS_CHAN_2GHZ(chan))
894 i = rate1l;
895 else
896 i = rate6mb;
897
898 regulatory->max_power_level = ratesArray[i];
899
886 if (AR_SREV_9280_20_OR_LATER(ah)) { 900 if (AR_SREV_9280_20_OR_LATER(ah)) {
887 for (i = 0; i < Ar5416RateSize; i++) 901 for (i = 0; i < Ar5416RateSize; i++)
888 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2; 902 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
@@ -971,17 +985,6 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
971 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) 985 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
972 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); 986 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
973 } 987 }
974
975 if (IS_CHAN_2GHZ(chan))
976 i = rate1l;
977 else
978 i = rate6mb;
979
980 if (AR_SREV_9280_20_OR_LATER(ah))
981 regulatory->max_power_level =
982 ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2;
983 else
984 regulatory->max_power_level = ratesArray[i];
985} 988}
986 989
987static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah, 990static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 526d7c933f7..45f70b2404a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -1022,13 +1022,16 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
1022 0, {0, 0, 0, 0} 1022 0, {0, 0, 0, 0}
1023 }; 1023 };
1024 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 1024 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
1025 u16 ctlModesFor11a[] = 1025 static const u16 ctlModesFor11a[] = {
1026 { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 }; 1026 CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
1027 u16 ctlModesFor11g[] = 1027 };
1028 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, 1028 static const u16 ctlModesFor11g[] = {
1029 CTL_2GHT40 1029 CTL_11B, CTL_11G, CTL_2GHT20,
1030 }; 1030 CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
1031 u16 numCtlModes, *pCtlMode, ctlMode, freq; 1031 };
1032 u16 numCtlModes;
1033 const u16 *pCtlMode;
1034 u16 ctlMode, freq;
1032 struct chan_centers centers; 1035 struct chan_centers centers;
1033 int tx_chainmask; 1036 int tx_chainmask;
1034 u16 twiceMinEdgePower; 1037 u16 twiceMinEdgePower;
@@ -1259,7 +1262,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1259 u16 cfgCtl, 1262 u16 cfgCtl,
1260 u8 twiceAntennaReduction, 1263 u8 twiceAntennaReduction,
1261 u8 twiceMaxRegulatoryPower, 1264 u8 twiceMaxRegulatoryPower,
1262 u8 powerLimit) 1265 u8 powerLimit, bool test)
1263{ 1266{
1264#define RT_AR_DELTA(x) (ratesArray[x] - cck_ofdm_delta) 1267#define RT_AR_DELTA(x) (ratesArray[x] - cck_ofdm_delta)
1265 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 1268 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
@@ -1286,12 +1289,44 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1286 1289
1287 ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset); 1290 ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset);
1288 1291
1292 regulatory->max_power_level = 0;
1289 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { 1293 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
1290 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); 1294 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
1291 if (ratesArray[i] > AR5416_MAX_RATE_POWER) 1295 if (ratesArray[i] > AR5416_MAX_RATE_POWER)
1292 ratesArray[i] = AR5416_MAX_RATE_POWER; 1296 ratesArray[i] = AR5416_MAX_RATE_POWER;
1297 if (ratesArray[i] > regulatory->max_power_level)
1298 regulatory->max_power_level = ratesArray[i];
1299 }
1300
1301 if (!test) {
1302 i = rate6mb;
1303
1304 if (IS_CHAN_HT40(chan))
1305 i = rateHt40_0;
1306 else if (IS_CHAN_HT20(chan))
1307 i = rateHt20_0;
1308
1309 regulatory->max_power_level = ratesArray[i];
1310 }
1311
1312 switch(ar5416_get_ntxchains(ah->txchainmask)) {
1313 case 1:
1314 break;
1315 case 2:
1316 regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
1317 break;
1318 case 3:
1319 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
1320 break;
1321 default:
1322 ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
1323 "Invalid chainmask configuration\n");
1324 break;
1293 } 1325 }
1294 1326
1327 if (test)
1328 return;
1329
1295 if (AR_SREV_9280_20_OR_LATER(ah)) { 1330 if (AR_SREV_9280_20_OR_LATER(ah)) {
1296 for (i = 0; i < Ar5416RateSize; i++) { 1331 for (i = 0; i < Ar5416RateSize; i++) {
1297 int8_t pwr_table_offset; 1332 int8_t pwr_table_offset;
@@ -1388,34 +1423,6 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1388 REG_WRITE(ah, AR_PHY_POWER_TX_SUB, 1423 REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
1389 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6) 1424 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
1390 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)); 1425 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
1391
1392 i = rate6mb;
1393
1394 if (IS_CHAN_HT40(chan))
1395 i = rateHt40_0;
1396 else if (IS_CHAN_HT20(chan))
1397 i = rateHt20_0;
1398
1399 if (AR_SREV_9280_20_OR_LATER(ah))
1400 regulatory->max_power_level =
1401 ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2;
1402 else
1403 regulatory->max_power_level = ratesArray[i];
1404
1405 switch(ar5416_get_ntxchains(ah->txchainmask)) {
1406 case 1:
1407 break;
1408 case 2:
1409 regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
1410 break;
1411 case 3:
1412 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
1413 break;
1414 default:
1415 ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
1416 "Invalid chainmask configuration\n");
1417 break;
1418 }
1419} 1426}
1420 1427
1421static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah, 1428static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 4a9a68bba32..6a1a482f9dc 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -259,7 +259,7 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
259 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period); 259 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
260 260
261 if ((ah->imask & ATH9K_INT_GENTIMER) == 0) { 261 if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
262 ath9k_hw_set_interrupts(ah, 0); 262 ath9k_hw_disable_interrupts(ah);
263 ah->imask |= ATH9K_INT_GENTIMER; 263 ah->imask |= ATH9K_INT_GENTIMER;
264 ath9k_hw_set_interrupts(ah, ah->imask); 264 ath9k_hw_set_interrupts(ah, ah->imask);
265 } 265 }
@@ -273,7 +273,7 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
273 273
274 /* if no timer is enabled, turn off interrupt mask */ 274 /* if no timer is enabled, turn off interrupt mask */
275 if (timer_table->timer_mask.val == 0) { 275 if (timer_table->timer_mask.val == 0) {
276 ath9k_hw_set_interrupts(ah, 0); 276 ath9k_hw_disable_interrupts(ah);
277 ah->imask &= ~ATH9K_INT_GENTIMER; 277 ah->imask &= ~ATH9K_INT_GENTIMER;
278 ath9k_hw_set_interrupts(ah, ah->imask); 278 ath9k_hw_set_interrupts(ah, ah->imask);
279 } 279 }
@@ -310,10 +310,8 @@ static void ath_btcoex_period_timer(unsigned long data)
310 310
311 timer_period = is_btscan ? btcoex->btscan_no_stomp : 311 timer_period = is_btscan ? btcoex->btscan_no_stomp :
312 btcoex->btcoex_no_stomp; 312 btcoex->btcoex_no_stomp;
313 ath9k_gen_timer_start(ah, 313 ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, 0,
314 btcoex->no_stomp_timer, 314 timer_period * 10);
315 (ath9k_hw_gettsf32(ah) +
316 timer_period), timer_period * 10);
317 btcoex->hw_timer_enabled = true; 315 btcoex->hw_timer_enabled = true;
318 } 316 }
319 317
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index dfb6560dab9..ae842dbf9b5 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -28,10 +28,16 @@ MODULE_FIRMWARE(FIRMWARE_AR9271);
28static struct usb_device_id ath9k_hif_usb_ids[] = { 28static struct usb_device_id ath9k_hif_usb_ids[] = {
29 { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */ 29 { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
30 { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */ 30 { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
31 { USB_DEVICE(0x0cf3, 0x7010) }, /* Atheros */ 31 { USB_DEVICE(0x0cf3, 0x7010),
32 { USB_DEVICE(0x0cf3, 0x7015) }, /* Atheros */ 32 .driver_info = AR7010_DEVICE },
33 /* Atheros */
34 { USB_DEVICE(0x0cf3, 0x7015),
35 .driver_info = AR7010_DEVICE | AR9287_DEVICE },
36 /* Atheros */
33 { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */ 37 { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
34 { USB_DEVICE(0x0846, 0x9018) }, /* Netgear WNDA3200 */ 38 { USB_DEVICE(0x0846, 0x9018),
39 .driver_info = AR7010_DEVICE },
40 /* Netgear WNDA3200 */
35 { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ 41 { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
36 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ 42 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
37 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ 43 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
@@ -40,9 +46,13 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
40 { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */ 46 { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
41 { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */ 47 { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
42 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ 48 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
43 { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ 49 { USB_DEVICE(0x083A, 0xA704),
50 .driver_info = AR7010_DEVICE },
51 /* SMC Networks */
44 { USB_DEVICE(0x040D, 0x3801) }, /* VIA */ 52 { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
45 { USB_DEVICE(0x1668, 0x1200) }, /* Verizon */ 53 { USB_DEVICE(0x1668, 0x1200),
54 .driver_info = AR7010_DEVICE | AR9287_DEVICE },
55 /* Verizon */
46 { }, 56 { },
47}; 57};
48 58
@@ -776,7 +786,8 @@ static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
776 ath9k_hif_usb_dealloc_rx_urbs(hif_dev); 786 ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
777} 787}
778 788
779static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) 789static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev,
790 u32 drv_info)
780{ 791{
781 int transfer, err; 792 int transfer, err;
782 const void *data = hif_dev->firmware->data; 793 const void *data = hif_dev->firmware->data;
@@ -807,18 +818,10 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
807 } 818 }
808 kfree(buf); 819 kfree(buf);
809 820
810 switch (hif_dev->device_id) { 821 if (drv_info & AR7010_DEVICE)
811 case 0x7010:
812 case 0x7015:
813 case 0x9018:
814 case 0xA704:
815 case 0x1200:
816 firm_offset = AR7010_FIRMWARE_TEXT; 822 firm_offset = AR7010_FIRMWARE_TEXT;
817 break; 823 else
818 default:
819 firm_offset = AR9271_FIRMWARE_TEXT; 824 firm_offset = AR9271_FIRMWARE_TEXT;
820 break;
821 }
822 825
823 /* 826 /*
824 * Issue FW download complete command to firmware. 827 * Issue FW download complete command to firmware.
@@ -836,7 +839,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
836 return 0; 839 return 0;
837} 840}
838 841
839static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) 842static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
840{ 843{
841 int ret, idx; 844 int ret, idx;
842 struct usb_host_interface *alt = &hif_dev->interface->altsetting[0]; 845 struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
@@ -852,7 +855,7 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
852 } 855 }
853 856
854 /* Download firmware */ 857 /* Download firmware */
855 ret = ath9k_hif_usb_download_fw(hif_dev); 858 ret = ath9k_hif_usb_download_fw(hif_dev, drv_info);
856 if (ret) { 859 if (ret) {
857 dev_err(&hif_dev->udev->dev, 860 dev_err(&hif_dev->udev->dev,
858 "ath9k_htc: Firmware - %s download failed\n", 861 "ath9k_htc: Firmware - %s download failed\n",
@@ -931,23 +934,15 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
931 934
932 /* Find out which firmware to load */ 935 /* Find out which firmware to load */
933 936
934 switch(hif_dev->device_id) { 937 if (id->driver_info & AR7010_DEVICE)
935 case 0x7010:
936 case 0x7015:
937 case 0x9018:
938 case 0xA704:
939 case 0x1200:
940 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) 938 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
941 hif_dev->fw_name = FIRMWARE_AR7010_1_1; 939 hif_dev->fw_name = FIRMWARE_AR7010_1_1;
942 else 940 else
943 hif_dev->fw_name = FIRMWARE_AR7010; 941 hif_dev->fw_name = FIRMWARE_AR7010;
944 break; 942 else
945 default:
946 hif_dev->fw_name = FIRMWARE_AR9271; 943 hif_dev->fw_name = FIRMWARE_AR9271;
947 break;
948 }
949 944
950 ret = ath9k_hif_usb_dev_init(hif_dev); 945 ret = ath9k_hif_usb_dev_init(hif_dev, id->driver_info);
951 if (ret) { 946 if (ret) {
952 ret = -EINVAL; 947 ret = -EINVAL;
953 goto err_hif_init_usb; 948 goto err_hif_init_usb;
@@ -955,7 +950,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
955 950
956 ret = ath9k_htc_hw_init(hif_dev->htc_handle, 951 ret = ath9k_htc_hw_init(hif_dev->htc_handle,
957 &hif_dev->udev->dev, hif_dev->device_id, 952 &hif_dev->udev->dev, hif_dev->device_id,
958 hif_dev->udev->product); 953 hif_dev->udev->product, id->driver_info);
959 if (ret) { 954 if (ret) {
960 ret = -EINVAL; 955 ret = -EINVAL;
961 goto err_htc_hw_init; 956 goto err_htc_hw_init;
@@ -1033,6 +1028,7 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
1033{ 1028{
1034 struct hif_device_usb *hif_dev = 1029 struct hif_device_usb *hif_dev =
1035 (struct hif_device_usb *) usb_get_intfdata(interface); 1030 (struct hif_device_usb *) usb_get_intfdata(interface);
1031 struct htc_target *htc_handle = hif_dev->htc_handle;
1036 int ret; 1032 int ret;
1037 1033
1038 ret = ath9k_hif_usb_alloc_urbs(hif_dev); 1034 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
@@ -1040,7 +1036,8 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
1040 return ret; 1036 return ret;
1041 1037
1042 if (hif_dev->firmware) { 1038 if (hif_dev->firmware) {
1043 ret = ath9k_hif_usb_download_fw(hif_dev); 1039 ret = ath9k_hif_usb_download_fw(hif_dev,
1040 htc_handle->drv_priv->ah->common.driver_info);
1044 if (ret) 1041 if (ret)
1045 goto fail_resume; 1042 goto fail_resume;
1046 } else { 1043 } else {
@@ -1050,7 +1047,7 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
1050 1047
1051 mdelay(100); 1048 mdelay(100);
1052 1049
1053 ret = ath9k_htc_resume(hif_dev->htc_handle); 1050 ret = ath9k_htc_resume(htc_handle);
1054 1051
1055 if (ret) 1052 if (ret)
1056 goto fail_resume; 1053 goto fail_resume;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 75ecf6a30d2..afe39a91190 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -368,7 +368,7 @@ struct ath9k_htc_priv {
368 u16 seq_no; 368 u16 seq_no;
369 u32 bmiss_cnt; 369 u32 bmiss_cnt;
370 370
371 struct ath9k_hw_cal_data caldata[38]; 371 struct ath9k_hw_cal_data caldata[ATH9K_NUM_CHANNELS];
372 372
373 spinlock_t beacon_lock; 373 spinlock_t beacon_lock;
374 374
@@ -461,7 +461,7 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv);
461void ath9k_deinit_leds(struct ath9k_htc_priv *priv); 461void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
462 462
463int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, 463int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
464 u16 devid, char *product); 464 u16 devid, char *product, u32 drv_info);
465void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug); 465void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
466#ifdef CONFIG_PM 466#ifdef CONFIG_PM
467int ath9k_htc_resume(struct htc_target *htc_handle); 467int ath9k_htc_resume(struct htc_target *htc_handle);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 7c8a38d0456..071d0c97474 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -181,7 +181,8 @@ static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
181 return htc_connect_service(priv->htc, &req, ep_id); 181 return htc_connect_service(priv->htc, &req, ep_id);
182} 182}
183 183
184static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid) 184static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid,
185 u32 drv_info)
185{ 186{
186 int ret; 187 int ret;
187 188
@@ -245,17 +246,10 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
245 * the HIF layer, shouldn't matter much. 246 * the HIF layer, shouldn't matter much.
246 */ 247 */
247 248
248 switch(devid) { 249 if (drv_info & AR7010_DEVICE)
249 case 0x7010:
250 case 0x7015:
251 case 0x9018:
252 case 0xA704:
253 case 0x1200:
254 priv->htc->credits = 45; 250 priv->htc->credits = 45;
255 break; 251 else
256 default:
257 priv->htc->credits = 33; 252 priv->htc->credits = 33;
258 }
259 253
260 ret = htc_init(priv->htc); 254 ret = htc_init(priv->htc);
261 if (ret) 255 if (ret)
@@ -308,7 +302,7 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
308 struct ath_hw *ah = (struct ath_hw *) hw_priv; 302 struct ath_hw *ah = (struct ath_hw *) hw_priv;
309 struct ath_common *common = ath9k_hw_common(ah); 303 struct ath_common *common = ath9k_hw_common(ah);
310 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; 304 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
311 __be32 buf[2] = { 305 const __be32 buf[2] = {
312 cpu_to_be32(reg_offset), 306 cpu_to_be32(reg_offset),
313 cpu_to_be32(val), 307 cpu_to_be32(val),
314 }; 308 };
@@ -627,7 +621,8 @@ static void ath9k_init_btcoex(struct ath9k_htc_priv *priv)
627} 621}
628 622
629static int ath9k_init_priv(struct ath9k_htc_priv *priv, 623static int ath9k_init_priv(struct ath9k_htc_priv *priv,
630 u16 devid, char *product) 624 u16 devid, char *product,
625 u32 drv_info)
631{ 626{
632 struct ath_hw *ah = NULL; 627 struct ath_hw *ah = NULL;
633 struct ath_common *common; 628 struct ath_common *common;
@@ -641,6 +636,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
641 636
642 ah->hw_version.devid = devid; 637 ah->hw_version.devid = devid;
643 ah->hw_version.subsysid = 0; /* FIXME */ 638 ah->hw_version.subsysid = 0; /* FIXME */
639 ah->ah_flags |= AH_USE_EEPROM;
644 priv->ah = ah; 640 priv->ah = ah;
645 641
646 common = ath9k_hw_common(ah); 642 common = ath9k_hw_common(ah);
@@ -650,6 +646,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
650 common->hw = priv->hw; 646 common->hw = priv->hw;
651 common->priv = priv; 647 common->priv = priv;
652 common->debug_mask = ath9k_debug; 648 common->debug_mask = ath9k_debug;
649 common->driver_info = drv_info;
653 650
654 spin_lock_init(&priv->wmi->wmi_lock); 651 spin_lock_init(&priv->wmi->wmi_lock);
655 spin_lock_init(&priv->beacon_lock); 652 spin_lock_init(&priv->beacon_lock);
@@ -762,7 +759,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
762} 759}
763 760
764static int ath9k_init_device(struct ath9k_htc_priv *priv, 761static int ath9k_init_device(struct ath9k_htc_priv *priv,
765 u16 devid, char *product) 762 u16 devid, char *product, u32 drv_info)
766{ 763{
767 struct ieee80211_hw *hw = priv->hw; 764 struct ieee80211_hw *hw = priv->hw;
768 struct ath_common *common; 765 struct ath_common *common;
@@ -771,7 +768,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
771 struct ath_regulatory *reg; 768 struct ath_regulatory *reg;
772 769
773 /* Bring up device */ 770 /* Bring up device */
774 error = ath9k_init_priv(priv, devid, product); 771 error = ath9k_init_priv(priv, devid, product, drv_info);
775 if (error != 0) 772 if (error != 0)
776 goto err_init; 773 goto err_init;
777 774
@@ -829,7 +826,7 @@ err_init:
829} 826}
830 827
831int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, 828int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
832 u16 devid, char *product) 829 u16 devid, char *product, u32 drv_info)
833{ 830{
834 struct ieee80211_hw *hw; 831 struct ieee80211_hw *hw;
835 struct ath9k_htc_priv *priv; 832 struct ath9k_htc_priv *priv;
@@ -856,14 +853,14 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
856 goto err_free; 853 goto err_free;
857 } 854 }
858 855
859 ret = ath9k_init_htc_services(priv, devid); 856 ret = ath9k_init_htc_services(priv, devid, drv_info);
860 if (ret) 857 if (ret)
861 goto err_init; 858 goto err_init;
862 859
863 /* The device may have been unplugged earlier. */ 860 /* The device may have been unplugged earlier. */
864 priv->op_flags &= ~OP_UNPLUGGED; 861 priv->op_flags &= ~OP_UNPLUGGED;
865 862
866 ret = ath9k_init_device(priv, devid, product); 863 ret = ath9k_init_device(priv, devid, product, drv_info);
867 if (ret) 864 if (ret)
868 goto err_init; 865 goto err_init;
869 866
@@ -893,14 +890,15 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
893#ifdef CONFIG_PM 890#ifdef CONFIG_PM
894int ath9k_htc_resume(struct htc_target *htc_handle) 891int ath9k_htc_resume(struct htc_target *htc_handle)
895{ 892{
893 struct ath9k_htc_priv *priv = htc_handle->drv_priv;
896 int ret; 894 int ret;
897 895
898 ret = ath9k_htc_wait_for_target(htc_handle->drv_priv); 896 ret = ath9k_htc_wait_for_target(priv);
899 if (ret) 897 if (ret)
900 return ret; 898 return ret;
901 899
902 ret = ath9k_init_htc_services(htc_handle->drv_priv, 900 ret = ath9k_init_htc_services(priv, priv->ah->hw_version.devid,
903 htc_handle->drv_priv->ah->hw_version.devid); 901 priv->ah->common.driver_info);
904 return ret; 902 return ret;
905} 903}
906#endif 904#endif
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a3be8da755..e9761c2c870 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -29,7 +29,7 @@ static void ath_update_txpow(struct ath9k_htc_priv *priv)
29 struct ath_hw *ah = priv->ah; 29 struct ath_hw *ah = priv->ah;
30 30
31 if (priv->curtxpow != priv->txpowlimit) { 31 if (priv->curtxpow != priv->txpowlimit) {
32 ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit); 32 ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit, false);
33 /* read back in case value is clamped */ 33 /* read back in case value is clamped */
34 priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit; 34 priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
35 } 35 }
@@ -184,47 +184,6 @@ err:
184 return ret; 184 return ret;
185} 185}
186 186
187static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
188{
189 struct ath_common *common = ath9k_hw_common(priv->ah);
190 struct ath9k_htc_target_vif hvif;
191 int ret = 0;
192 u8 cmd_rsp;
193
194 if (priv->nvifs > 0)
195 return -ENOBUFS;
196
197 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
198 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
199
200 hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
201 priv->ah->opmode = NL80211_IFTYPE_MONITOR;
202 hvif.index = priv->nvifs;
203
204 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
205 if (ret)
206 return ret;
207
208 priv->nvifs++;
209 return 0;
210}
211
212static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
213{
214 struct ath_common *common = ath9k_hw_common(priv->ah);
215 struct ath9k_htc_target_vif hvif;
216 int ret = 0;
217 u8 cmd_rsp;
218
219 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
220 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
221 hvif.index = 0; /* Should do for now */
222 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
223 priv->nvifs--;
224
225 return ret;
226}
227
228static int ath9k_htc_add_station(struct ath9k_htc_priv *priv, 187static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
229 struct ieee80211_vif *vif, 188 struct ieee80211_vif *vif,
230 struct ieee80211_sta *sta) 189 struct ieee80211_sta *sta)
@@ -1240,16 +1199,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1240 WMI_CMD(WMI_STOP_RECV_CMDID); 1199 WMI_CMD(WMI_STOP_RECV_CMDID);
1241 skb_queue_purge(&priv->tx_queue); 1200 skb_queue_purge(&priv->tx_queue);
1242 1201
1243 /* Remove monitor interface here */
1244 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
1245 if (ath9k_htc_remove_monitor_interface(priv))
1246 ath_print(common, ATH_DBG_FATAL,
1247 "Unable to remove monitor interface\n");
1248 else
1249 ath_print(common, ATH_DBG_CONFIG,
1250 "Monitor interface removed\n");
1251 }
1252
1253 if (ah->btcoex_hw.enabled) { 1202 if (ah->btcoex_hw.enabled) {
1254 ath9k_hw_btcoex_disable(ah); 1203 ath9k_hw_btcoex_disable(ah);
1255 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 1204 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -1400,7 +1349,9 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1400 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n", 1349 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
1401 curchan->center_freq); 1350 curchan->center_freq);
1402 1351
1403 ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]); 1352 ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
1353 hw->conf.channel,
1354 hw->conf.channel_type);
1404 1355
1405 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { 1356 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
1406 ath_print(common, ATH_DBG_FATAL, 1357 ath_print(common, ATH_DBG_FATAL,
@@ -1421,16 +1372,13 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1421 } 1372 }
1422 } 1373 }
1423 1374
1424 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1375 if (changed & IEEE80211_CONF_CHANGE_MONITOR)
1425 if (conf->flags & IEEE80211_CONF_MONITOR) { 1376 if (conf->flags & IEEE80211_CONF_MONITOR) {
1426 if (ath9k_htc_add_monitor_interface(priv)) 1377 ath_print(common, ATH_DBG_CONFIG,
1427 ath_print(common, ATH_DBG_FATAL, 1378 "HW opmode set to Monitor mode\n");
1428 "Failed to set monitor mode\n"); 1379 priv->ah->opmode = NL80211_IFTYPE_MONITOR;
1429 else
1430 ath_print(common, ATH_DBG_CONFIG,
1431 "HW opmode set to Monitor mode\n");
1432 } 1380 }
1433 } 1381
1434 1382
1435 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1383 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1436 mutex_lock(&priv->htc_pm_lock); 1384 mutex_lock(&priv->htc_pm_lock);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 29d80ca7839..77958675b55 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -20,8 +20,15 @@
20/* TX */ 20/* TX */
21/******/ 21/******/
22 22
23static const int subtype_txq_to_hwq[] = {
24 [WME_AC_BE] = ATH_TXQ_AC_BE,
25 [WME_AC_BK] = ATH_TXQ_AC_BK,
26 [WME_AC_VI] = ATH_TXQ_AC_VI,
27 [WME_AC_VO] = ATH_TXQ_AC_VO,
28};
29
23#define ATH9K_HTC_INIT_TXQ(subtype) do { \ 30#define ATH9K_HTC_INIT_TXQ(subtype) do { \
24 qi.tqi_subtype = subtype; \ 31 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; \
25 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \ 32 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \
26 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \ 33 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \
27 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \ 34 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 861ec926930..c41ab8c3016 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -462,9 +462,10 @@ void ath9k_htc_hw_free(struct htc_target *htc)
462} 462}
463 463
464int ath9k_htc_hw_init(struct htc_target *target, 464int ath9k_htc_hw_init(struct htc_target *target,
465 struct device *dev, u16 devid, char *product) 465 struct device *dev, u16 devid,
466 char *product, u32 drv_info)
466{ 467{
467 if (ath9k_htc_probe_device(target, dev, devid, product)) { 468 if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) {
468 printk(KERN_ERR "Failed to initialize the device\n"); 469 printk(KERN_ERR "Failed to initialize the device\n");
469 return -ENODEV; 470 return -ENODEV;
470 } 471 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index 07b6509d589..6fc1b21faa5 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -239,7 +239,8 @@ struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
239 struct device *dev); 239 struct device *dev);
240void ath9k_htc_hw_free(struct htc_target *htc); 240void ath9k_htc_hw_free(struct htc_target *htc);
241int ath9k_htc_hw_init(struct htc_target *target, 241int ath9k_htc_hw_init(struct htc_target *target,
242 struct device *dev, u16 devid, char *product); 242 struct device *dev, u16 devid, char *product,
243 u32 drv_info);
243void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug); 244void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
244 245
245#endif /* HTC_HST_H */ 246#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index c7fbe25cc12..380d0c65113 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -310,10 +310,9 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
310 struct ath_common *common = ath9k_hw_common(ah); 310 struct ath_common *common = ath9k_hw_common(ah);
311 u32 regAddr[2] = { AR_STA_ID0 }; 311 u32 regAddr[2] = { AR_STA_ID0 };
312 u32 regHold[2]; 312 u32 regHold[2];
313 u32 patternData[4] = { 0x55555555, 313 static const u32 patternData[4] = {
314 0xaaaaaaaa, 314 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999
315 0x66666666, 315 };
316 0x99999999 };
317 int i, j, loop_max; 316 int i, j, loop_max;
318 317
319 if (!AR_SREV_9300_20_OR_LATER(ah)) { 318 if (!AR_SREV_9300_20_OR_LATER(ah)) {
@@ -419,10 +418,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
419 ah->hw_version.magic = AR5416_MAGIC; 418 ah->hw_version.magic = AR5416_MAGIC;
420 ah->hw_version.subvendorid = 0; 419 ah->hw_version.subvendorid = 0;
421 420
422 ah->ah_flags = 0;
423 if (!AR_SREV_9100(ah))
424 ah->ah_flags = AH_USE_EEPROM;
425
426 ah->atim_window = 0; 421 ah->atim_window = 0;
427 ah->sta_id1_defaults = 422 ah->sta_id1_defaults =
428 AR_STA_ID1_CRPT_MIC_ENABLE | 423 AR_STA_ID1_CRPT_MIC_ENABLE |
@@ -440,7 +435,7 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
440 u32 sum; 435 u32 sum;
441 int i; 436 int i;
442 u16 eeval; 437 u16 eeval;
443 u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW }; 438 static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
444 439
445 sum = 0; 440 sum = 0;
446 for (i = 0; i < 3; i++) { 441 for (i = 0; i < 3; i++) {
@@ -1170,7 +1165,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1170 channel->max_antenna_gain * 2, 1165 channel->max_antenna_gain * 2,
1171 channel->max_power * 2, 1166 channel->max_power * 2,
1172 min((u32) MAX_RATE_POWER, 1167 min((u32) MAX_RATE_POWER,
1173 (u32) regulatory->power_limit)); 1168 (u32) regulatory->power_limit), false);
1174 1169
1175 ath9k_hw_rfbus_done(ah); 1170 ath9k_hw_rfbus_done(ah);
1176 1171
@@ -1833,6 +1828,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1833 1828
1834 ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA; 1829 ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
1835 1830
1831 /* enable key search for every frame in an aggregate */
1832 if (AR_SREV_9300_20_OR_LATER(ah))
1833 ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH;
1834
1836 pCap->low_2ghz_chan = 2312; 1835 pCap->low_2ghz_chan = 2312;
1837 pCap->high_2ghz_chan = 2732; 1836 pCap->high_2ghz_chan = 2732;
1838 1837
@@ -1963,6 +1962,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1963 if (AR_SREV_9300_20_OR_LATER(ah)) 1962 if (AR_SREV_9300_20_OR_LATER(ah))
1964 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED; 1963 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
1965 1964
1965 if (AR_SREV_9300_20_OR_LATER(ah))
1966 ah->ent_mode = REG_READ(ah, AR_ENT_OTP);
1967
1966 if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah)) 1968 if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
1967 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; 1969 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
1968 1970
@@ -2177,7 +2179,7 @@ bool ath9k_hw_disable(struct ath_hw *ah)
2177} 2179}
2178EXPORT_SYMBOL(ath9k_hw_disable); 2180EXPORT_SYMBOL(ath9k_hw_disable);
2179 2181
2180void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit) 2182void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
2181{ 2183{
2182 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 2184 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
2183 struct ath9k_channel *chan = ah->curchan; 2185 struct ath9k_channel *chan = ah->curchan;
@@ -2190,7 +2192,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
2190 channel->max_antenna_gain * 2, 2192 channel->max_antenna_gain * 2,
2191 channel->max_power * 2, 2193 channel->max_power * 2,
2192 min((u32) MAX_RATE_POWER, 2194 min((u32) MAX_RATE_POWER,
2193 (u32) regulatory->power_limit)); 2195 (u32) regulatory->power_limit), test);
2194} 2196}
2195EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit); 2197EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
2196 2198
@@ -2324,11 +2326,10 @@ static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
2324 return timer_table->gen_timer_index[b]; 2326 return timer_table->gen_timer_index[b];
2325} 2327}
2326 2328
2327u32 ath9k_hw_gettsf32(struct ath_hw *ah) 2329static u32 ath9k_hw_gettsf32(struct ath_hw *ah)
2328{ 2330{
2329 return REG_READ(ah, AR_TSF_L32); 2331 return REG_READ(ah, AR_TSF_L32);
2330} 2332}
2331EXPORT_SYMBOL(ath9k_hw_gettsf32);
2332 2333
2333struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 2334struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
2334 void (*trigger)(void *), 2335 void (*trigger)(void *),
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index d47d1b4b600..cc8f3b9af71 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -157,6 +157,13 @@
157#define PAPRD_GAIN_TABLE_ENTRIES 32 157#define PAPRD_GAIN_TABLE_ENTRIES 32
158#define PAPRD_TABLE_SZ 24 158#define PAPRD_TABLE_SZ 24
159 159
160enum ath_hw_txq_subtype {
161 ATH_TXQ_AC_BE = 0,
162 ATH_TXQ_AC_BK = 1,
163 ATH_TXQ_AC_VI = 2,
164 ATH_TXQ_AC_VO = 3,
165};
166
160enum ath_ini_subsys { 167enum ath_ini_subsys {
161 ATH_INI_PRE = 0, 168 ATH_INI_PRE = 0,
162 ATH_INI_CORE, 169 ATH_INI_CORE,
@@ -478,6 +485,40 @@ struct ath_hw_antcomb_conf {
478}; 485};
479 486
480/** 487/**
488 * struct ath_hw_radar_conf - radar detection initialization parameters
489 *
490 * @pulse_inband: threshold for checking the ratio of in-band power
491 * to total power for short radar pulses (half dB steps)
492 * @pulse_inband_step: threshold for checking an in-band power to total
493 * power ratio increase for short radar pulses (half dB steps)
494 * @pulse_height: threshold for detecting the beginning of a short
495 * radar pulse (dB step)
496 * @pulse_rssi: threshold for detecting if a short radar pulse is
497 * gone (dB step)
498 * @pulse_maxlen: maximum pulse length (0.8 us steps)
499 *
500 * @radar_rssi: RSSI threshold for starting long radar detection (dB steps)
501 * @radar_inband: threshold for checking the ratio of in-band power
502 * to total power for long radar pulses (half dB steps)
503 * @fir_power: threshold for detecting the end of a long radar pulse (dB)
504 *
505 * @ext_channel: enable extension channel radar detection
506 */
507struct ath_hw_radar_conf {
508 unsigned int pulse_inband;
509 unsigned int pulse_inband_step;
510 unsigned int pulse_height;
511 unsigned int pulse_rssi;
512 unsigned int pulse_maxlen;
513
514 unsigned int radar_rssi;
515 unsigned int radar_inband;
516 int fir_power;
517
518 bool ext_channel;
519};
520
521/**
481 * struct ath_hw_private_ops - callbacks used internally by hardware code 522 * struct ath_hw_private_ops - callbacks used internally by hardware code
482 * 523 *
483 * This structure contains private callbacks designed to only be used internally 524 * This structure contains private callbacks designed to only be used internally
@@ -542,6 +583,8 @@ struct ath_hw_private_ops {
542 bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd, 583 bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
543 int param); 584 int param);
544 void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]); 585 void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
586 void (*set_radar_params)(struct ath_hw *ah,
587 struct ath_hw_radar_conf *conf);
545 588
546 /* ANI */ 589 /* ANI */
547 void (*ani_cache_ini_regs)(struct ath_hw *ah); 590 void (*ani_cache_ini_regs)(struct ath_hw *ah);
@@ -740,6 +783,8 @@ struct ath_hw {
740 u8 txchainmask; 783 u8 txchainmask;
741 u8 rxchainmask; 784 u8 rxchainmask;
742 785
786 struct ath_hw_radar_conf radar_conf;
787
743 u32 originalGain[22]; 788 u32 originalGain[22];
744 int initPDADC; 789 int initPDADC;
745 int PDADCdelta; 790 int PDADCdelta;
@@ -797,6 +842,9 @@ struct ath_hw {
797 * this register when in sleep states. 842 * this register when in sleep states.
798 */ 843 */
799 u32 WARegVal; 844 u32 WARegVal;
845
846 /* Enterprise mode cap */
847 u32 ent_mode;
800}; 848};
801 849
802static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) 850static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -819,12 +867,6 @@ static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
819 return &ah->ops; 867 return &ah->ops;
820} 868}
821 869
822static inline int sign_extend(int val, const int nbits)
823{
824 int order = BIT(nbits-1);
825 return (val ^ order) - order;
826}
827
828/* Initialization, Detach, Reset */ 870/* Initialization, Detach, Reset */
829const char *ath9k_hw_probe(u16 vendorid, u16 devid); 871const char *ath9k_hw_probe(u16 vendorid, u16 devid);
830void ath9k_hw_deinit(struct ath_hw *ah); 872void ath9k_hw_deinit(struct ath_hw *ah);
@@ -861,7 +903,7 @@ u32 ath9k_hw_getrxfilter(struct ath_hw *ah);
861void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits); 903void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits);
862bool ath9k_hw_phy_disable(struct ath_hw *ah); 904bool ath9k_hw_phy_disable(struct ath_hw *ah);
863bool ath9k_hw_disable(struct ath_hw *ah); 905bool ath9k_hw_disable(struct ath_hw *ah);
864void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit); 906void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
865void ath9k_hw_setopmode(struct ath_hw *ah); 907void ath9k_hw_setopmode(struct ath_hw *ah);
866void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); 908void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
867void ath9k_hw_setbssidmask(struct ath_hw *ah); 909void ath9k_hw_setbssidmask(struct ath_hw *ah);
@@ -893,7 +935,6 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
893 935
894void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer); 936void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
895void ath_gen_timer_isr(struct ath_hw *hw); 937void ath_gen_timer_isr(struct ath_hw *hw);
896u32 ath9k_hw_gettsf32(struct ath_hw *ah);
897 938
898void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len); 939void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
899 940
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 14b8ab386da..84e19e504dd 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -395,7 +395,8 @@ static void ath9k_init_crypto(struct ath_softc *sc)
395 395
396static int ath9k_init_btcoex(struct ath_softc *sc) 396static int ath9k_init_btcoex(struct ath_softc *sc)
397{ 397{
398 int r, qnum; 398 struct ath_txq *txq;
399 int r;
399 400
400 switch (sc->sc_ah->btcoex_hw.scheme) { 401 switch (sc->sc_ah->btcoex_hw.scheme) {
401 case ATH_BTCOEX_CFG_NONE: 402 case ATH_BTCOEX_CFG_NONE:
@@ -408,8 +409,8 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
408 r = ath_init_btcoex_timer(sc); 409 r = ath_init_btcoex_timer(sc);
409 if (r) 410 if (r)
410 return -1; 411 return -1;
411 qnum = sc->tx.hwq_map[WME_AC_BE]; 412 txq = sc->tx.txq_map[WME_AC_BE];
412 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum); 413 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
413 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 414 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
414 break; 415 break;
415 default: 416 default:
@@ -422,59 +423,18 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
422 423
423static int ath9k_init_queues(struct ath_softc *sc) 424static int ath9k_init_queues(struct ath_softc *sc)
424{ 425{
425 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
426 int i = 0; 426 int i = 0;
427 427
428 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
429 sc->tx.hwq_map[i] = -1;
430
431 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah); 428 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
432 if (sc->beacon.beaconq == -1) {
433 ath_print(common, ATH_DBG_FATAL,
434 "Unable to setup a beacon xmit queue\n");
435 goto err;
436 }
437
438 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 429 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
439 if (sc->beacon.cabq == NULL) {
440 ath_print(common, ATH_DBG_FATAL,
441 "Unable to setup CAB xmit queue\n");
442 goto err;
443 }
444 430
445 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 431 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
446 ath_cabq_update(sc); 432 ath_cabq_update(sc);
447 433
448 if (!ath_tx_setup(sc, WME_AC_BK)) { 434 for (i = 0; i < WME_NUM_AC; i++)
449 ath_print(common, ATH_DBG_FATAL, 435 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
450 "Unable to setup xmit queue for BK traffic\n");
451 goto err;
452 }
453
454 if (!ath_tx_setup(sc, WME_AC_BE)) {
455 ath_print(common, ATH_DBG_FATAL,
456 "Unable to setup xmit queue for BE traffic\n");
457 goto err;
458 }
459 if (!ath_tx_setup(sc, WME_AC_VI)) {
460 ath_print(common, ATH_DBG_FATAL,
461 "Unable to setup xmit queue for VI traffic\n");
462 goto err;
463 }
464 if (!ath_tx_setup(sc, WME_AC_VO)) {
465 ath_print(common, ATH_DBG_FATAL,
466 "Unable to setup xmit queue for VO traffic\n");
467 goto err;
468 }
469 436
470 return 0; 437 return 0;
471
472err:
473 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
474 if (ATH_TXQ_SETUP(sc, i))
475 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
476
477 return -EIO;
478} 438}
479 439
480static int ath9k_init_channels_rates(struct ath_softc *sc) 440static int ath9k_init_channels_rates(struct ath_softc *sc)
@@ -570,6 +530,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
570 ah->hw_version.subsysid = subsysid; 530 ah->hw_version.subsysid = subsysid;
571 sc->sc_ah = ah; 531 sc->sc_ah = ah;
572 532
533 if (!sc->dev->platform_data)
534 ah->ah_flags |= AH_USE_EEPROM;
535
573 common = ath9k_hw_common(ah); 536 common = ath9k_hw_common(ah);
574 common->ops = &ath9k_common_ops; 537 common->ops = &ath9k_common_ops;
575 common->bus_ops = bus_ops; 538 common->bus_ops = bus_ops;
@@ -580,7 +543,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
580 spin_lock_init(&common->cc_lock); 543 spin_lock_init(&common->cc_lock);
581 544
582 spin_lock_init(&sc->wiphy_lock); 545 spin_lock_init(&sc->wiphy_lock);
583 spin_lock_init(&sc->sc_resetlock);
584 spin_lock_init(&sc->sc_serial_rw); 546 spin_lock_init(&sc->sc_serial_rw);
585 spin_lock_init(&sc->sc_pm_lock); 547 spin_lock_init(&sc->sc_pm_lock);
586 mutex_init(&sc->mutex); 548 mutex_init(&sc->mutex);
@@ -642,6 +604,37 @@ err_hw:
642 return ret; 604 return ret;
643} 605}
644 606
607static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
608{
609 struct ieee80211_supported_band *sband;
610 struct ieee80211_channel *chan;
611 struct ath_hw *ah = sc->sc_ah;
612 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
613 int i;
614
615 sband = &sc->sbands[band];
616 for (i = 0; i < sband->n_channels; i++) {
617 chan = &sband->channels[i];
618 ah->curchan = &ah->channels[chan->hw_value];
619 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
620 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
621 chan->max_power = reg->max_power_level / 2;
622 }
623}
624
625static void ath9k_init_txpower_limits(struct ath_softc *sc)
626{
627 struct ath_hw *ah = sc->sc_ah;
628 struct ath9k_channel *curchan = ah->curchan;
629
630 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
631 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
632 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
633 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
634
635 ah->curchan = curchan;
636}
637
645void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 638void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
646{ 639{
647 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 640 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -705,6 +698,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
705 const struct ath_bus_ops *bus_ops) 698 const struct ath_bus_ops *bus_ops)
706{ 699{
707 struct ieee80211_hw *hw = sc->hw; 700 struct ieee80211_hw *hw = sc->hw;
701 struct ath_wiphy *aphy = hw->priv;
708 struct ath_common *common; 702 struct ath_common *common;
709 struct ath_hw *ah; 703 struct ath_hw *ah;
710 int error = 0; 704 int error = 0;
@@ -737,6 +731,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
737 if (error != 0) 731 if (error != 0)
738 goto error_rx; 732 goto error_rx;
739 733
734 ath9k_init_txpower_limits(sc);
735
740 /* Register with mac80211 */ 736 /* Register with mac80211 */
741 error = ieee80211_register_hw(hw); 737 error = ieee80211_register_hw(hw);
742 if (error) 738 if (error)
@@ -754,6 +750,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
754 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); 750 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
755 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work); 751 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
756 sc->wiphy_scheduler_int = msecs_to_jiffies(500); 752 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
753 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
757 754
758 ath_init_leds(sc); 755 ath_init_leds(sc);
759 ath_start_rfkill_poll(sc); 756 ath_start_rfkill_poll(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 8c13479b17c..b04b37b1124 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -117,12 +117,11 @@ EXPORT_SYMBOL(ath9k_hw_numtxpending);
117bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) 117bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
118{ 118{
119 u32 txcfg, curLevel, newLevel; 119 u32 txcfg, curLevel, newLevel;
120 enum ath9k_int omask;
121 120
122 if (ah->tx_trig_level >= ah->config.max_txtrig_level) 121 if (ah->tx_trig_level >= ah->config.max_txtrig_level)
123 return false; 122 return false;
124 123
125 omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL); 124 ath9k_hw_disable_interrupts(ah);
126 125
127 txcfg = REG_READ(ah, AR_TXCFG); 126 txcfg = REG_READ(ah, AR_TXCFG);
128 curLevel = MS(txcfg, AR_FTRIG); 127 curLevel = MS(txcfg, AR_FTRIG);
@@ -136,7 +135,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
136 REG_WRITE(ah, AR_TXCFG, 135 REG_WRITE(ah, AR_TXCFG,
137 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); 136 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
138 137
139 ath9k_hw_set_interrupts(ah, omask); 138 ath9k_hw_enable_interrupts(ah);
140 139
141 ah->tx_trig_level = newLevel; 140 ah->tx_trig_level = newLevel;
142 141
@@ -767,14 +766,6 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
767} 766}
768EXPORT_SYMBOL(ath9k_hw_startpcureceive); 767EXPORT_SYMBOL(ath9k_hw_startpcureceive);
769 768
770void ath9k_hw_stoppcurecv(struct ath_hw *ah)
771{
772 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
773
774 ath9k_hw_disable_mib_counters(ah);
775}
776EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
777
778void ath9k_hw_abortpcurecv(struct ath_hw *ah) 769void ath9k_hw_abortpcurecv(struct ath_hw *ah)
779{ 770{
780 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS); 771 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
@@ -849,28 +840,59 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
849} 840}
850EXPORT_SYMBOL(ath9k_hw_intrpend); 841EXPORT_SYMBOL(ath9k_hw_intrpend);
851 842
852enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, 843void ath9k_hw_disable_interrupts(struct ath_hw *ah)
853 enum ath9k_int ints) 844{
845 struct ath_common *common = ath9k_hw_common(ah);
846
847 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
848 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
849 (void) REG_READ(ah, AR_IER);
850 if (!AR_SREV_9100(ah)) {
851 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
852 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
853
854 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
855 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
856 }
857}
858EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
859
860void ath9k_hw_enable_interrupts(struct ath_hw *ah)
861{
862 struct ath_common *common = ath9k_hw_common(ah);
863
864 if (!(ah->imask & ATH9K_INT_GLOBAL))
865 return;
866
867 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
868 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
869 if (!AR_SREV_9100(ah)) {
870 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
871 AR_INTR_MAC_IRQ);
872 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
873
874
875 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
876 AR_INTR_SYNC_DEFAULT);
877 REG_WRITE(ah, AR_INTR_SYNC_MASK,
878 AR_INTR_SYNC_DEFAULT);
879 }
880 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
881 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
882}
883EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
884
885void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
854{ 886{
855 enum ath9k_int omask = ah->imask; 887 enum ath9k_int omask = ah->imask;
856 u32 mask, mask2; 888 u32 mask, mask2;
857 struct ath9k_hw_capabilities *pCap = &ah->caps; 889 struct ath9k_hw_capabilities *pCap = &ah->caps;
858 struct ath_common *common = ath9k_hw_common(ah); 890 struct ath_common *common = ath9k_hw_common(ah);
859 891
860 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 892 if (!(ints & ATH9K_INT_GLOBAL))
861 893 ath9k_hw_enable_interrupts(ah);
862 if (omask & ATH9K_INT_GLOBAL) {
863 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
864 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
865 (void) REG_READ(ah, AR_IER);
866 if (!AR_SREV_9100(ah)) {
867 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
868 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
869 894
870 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); 895 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
871 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
872 }
873 }
874 896
875 /* TODO: global int Ref count */ 897 /* TODO: global int Ref count */
876 mask = ints & ATH9K_INT_COMMON; 898 mask = ints & ATH9K_INT_COMMON;
@@ -946,24 +968,8 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
946 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 968 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
947 } 969 }
948 970
949 if (ints & ATH9K_INT_GLOBAL) { 971 ath9k_hw_enable_interrupts(ah);
950 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
951 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
952 if (!AR_SREV_9100(ah)) {
953 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
954 AR_INTR_MAC_IRQ);
955 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
956
957
958 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
959 AR_INTR_SYNC_DEFAULT);
960 REG_WRITE(ah, AR_INTR_SYNC_MASK,
961 AR_INTR_SYNC_DEFAULT);
962 }
963 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
964 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
965 }
966 972
967 return omask; 973 return;
968} 974}
969EXPORT_SYMBOL(ath9k_hw_set_interrupts); 975EXPORT_SYMBOL(ath9k_hw_set_interrupts);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 7c1a34d64f6..7512f97e8f4 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -104,13 +104,11 @@ struct ath_tx_status {
104 u32 ts_tstamp; 104 u32 ts_tstamp;
105 u16 ts_seqnum; 105 u16 ts_seqnum;
106 u8 ts_status; 106 u8 ts_status;
107 u8 ts_ratecode;
108 u8 ts_rateindex; 107 u8 ts_rateindex;
109 int8_t ts_rssi; 108 int8_t ts_rssi;
110 u8 ts_shortretry; 109 u8 ts_shortretry;
111 u8 ts_longretry; 110 u8 ts_longretry;
112 u8 ts_virtcol; 111 u8 ts_virtcol;
113 u8 ts_antenna;
114 u8 ts_flags; 112 u8 ts_flags;
115 int8_t ts_rssi_ctl0; 113 int8_t ts_rssi_ctl0;
116 int8_t ts_rssi_ctl1; 114 int8_t ts_rssi_ctl1;
@@ -121,7 +119,6 @@ struct ath_tx_status {
121 u8 qid; 119 u8 qid;
122 u16 desc_id; 120 u16 desc_id;
123 u8 tid; 121 u8 tid;
124 u8 pad[2];
125 u32 ba_low; 122 u32 ba_low;
126 u32 ba_high; 123 u32 ba_high;
127 u32 evm0; 124 u32 evm0;
@@ -240,7 +237,7 @@ struct ath_desc {
240 u32 ds_ctl1; 237 u32 ds_ctl1;
241 u32 ds_hw[20]; 238 u32 ds_hw[20];
242 void *ds_vdata; 239 void *ds_vdata;
243} __packed; 240} __packed __aligned(4);
244 241
245#define ATH9K_TXDESC_CLRDMASK 0x0001 242#define ATH9K_TXDESC_CLRDMASK 0x0001
246#define ATH9K_TXDESC_NOACK 0x0002 243#define ATH9K_TXDESC_NOACK 0x0002
@@ -310,7 +307,7 @@ struct ar5416_desc {
310 u32 status8; 307 u32 status8;
311 } rx; 308 } rx;
312 } u; 309 } u;
313} __packed; 310} __packed __aligned(4);
314 311
315#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds)) 312#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds))
316#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds)) 313#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds))
@@ -669,6 +666,7 @@ enum ath9k_key_type {
669 666
670struct ath_hw; 667struct ath_hw;
671struct ath9k_channel; 668struct ath9k_channel;
669enum ath9k_int;
672 670
673u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q); 671u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
674void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp); 672void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
@@ -693,15 +691,15 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
693bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set); 691bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
694void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); 692void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
695void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning); 693void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
696void ath9k_hw_stoppcurecv(struct ath_hw *ah);
697void ath9k_hw_abortpcurecv(struct ath_hw *ah); 694void ath9k_hw_abortpcurecv(struct ath_hw *ah);
698bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 695bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
699int ath9k_hw_beaconq_setup(struct ath_hw *ah); 696int ath9k_hw_beaconq_setup(struct ath_hw *ah);
700 697
701/* Interrupt Handling */ 698/* Interrupt Handling */
702bool ath9k_hw_intrpend(struct ath_hw *ah); 699bool ath9k_hw_intrpend(struct ath_hw *ah);
703enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, 700void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
704 enum ath9k_int ints); 701void ath9k_hw_enable_interrupts(struct ath_hw *ah);
702void ath9k_hw_disable_interrupts(struct ath_hw *ah);
705 703
706void ar9002_hw_attach_mac_ops(struct ath_hw *ah); 704void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
707 705
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dace215b693..7acd6b0ca01 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -23,7 +23,7 @@ static void ath_update_txpow(struct ath_softc *sc)
23 struct ath_hw *ah = sc->sc_ah; 23 struct ath_hw *ah = sc->sc_ah;
24 24
25 if (sc->curtxpow != sc->config.txpowlimit) { 25 if (sc->curtxpow != sc->config.txpowlimit) {
26 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit); 26 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
27 /* read back in case value is clamped */ 27 /* read back in case value is clamped */
28 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit; 28 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
29 } 29 }
@@ -234,6 +234,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
234 234
235 ath9k_ps_wakeup(sc); 235 ath9k_ps_wakeup(sc);
236 236
237 spin_lock_bh(&sc->sc_pcu_lock);
238
237 /* 239 /*
238 * This is only performed if the channel settings have 240 * This is only performed if the channel settings have
239 * actually changed. 241 * actually changed.
@@ -243,11 +245,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
243 * hardware at the new frequency, and then re-enable 245 * hardware at the new frequency, and then re-enable
244 * the relevant bits of the h/w. 246 * the relevant bits of the h/w.
245 */ 247 */
246 ath9k_hw_set_interrupts(ah, 0); 248 ath9k_hw_disable_interrupts(ah);
247 ath_drain_all_txq(sc, false); 249 ath_drain_all_txq(sc, false);
248 250
249 spin_lock_bh(&sc->rx.pcu_lock);
250
251 stopped = ath_stoprecv(sc); 251 stopped = ath_stoprecv(sc);
252 252
253 /* XXX: do not flush receive queue here. We don't want 253 /* XXX: do not flush receive queue here. We don't want
@@ -266,30 +266,22 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
266 channel->center_freq, conf_is_ht40(conf), 266 channel->center_freq, conf_is_ht40(conf),
267 fastcc); 267 fastcc);
268 268
269 spin_lock_bh(&sc->sc_resetlock);
270
271 r = ath9k_hw_reset(ah, hchan, caldata, fastcc); 269 r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
272 if (r) { 270 if (r) {
273 ath_print(common, ATH_DBG_FATAL, 271 ath_print(common, ATH_DBG_FATAL,
274 "Unable to reset channel (%u MHz), " 272 "Unable to reset channel (%u MHz), "
275 "reset status %d\n", 273 "reset status %d\n",
276 channel->center_freq, r); 274 channel->center_freq, r);
277 spin_unlock_bh(&sc->sc_resetlock);
278 spin_unlock_bh(&sc->rx.pcu_lock);
279 goto ps_restore; 275 goto ps_restore;
280 } 276 }
281 spin_unlock_bh(&sc->sc_resetlock);
282 277
283 if (ath_startrecv(sc) != 0) { 278 if (ath_startrecv(sc) != 0) {
284 ath_print(common, ATH_DBG_FATAL, 279 ath_print(common, ATH_DBG_FATAL,
285 "Unable to restart recv logic\n"); 280 "Unable to restart recv logic\n");
286 r = -EIO; 281 r = -EIO;
287 spin_unlock_bh(&sc->rx.pcu_lock);
288 goto ps_restore; 282 goto ps_restore;
289 } 283 }
290 284
291 spin_unlock_bh(&sc->rx.pcu_lock);
292
293 ath_update_txpow(sc); 285 ath_update_txpow(sc);
294 ath9k_hw_set_interrupts(ah, ah->imask); 286 ath9k_hw_set_interrupts(ah, ah->imask);
295 287
@@ -300,6 +292,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
300 } 292 }
301 293
302 ps_restore: 294 ps_restore:
295 spin_unlock_bh(&sc->sc_pcu_lock);
296
303 ath9k_ps_restore(sc); 297 ath9k_ps_restore(sc);
304 return r; 298 return r;
305} 299}
@@ -340,7 +334,7 @@ void ath_paprd_calibrate(struct work_struct *work)
340 struct ath_tx_control txctl; 334 struct ath_tx_control txctl;
341 struct ath9k_hw_cal_data *caldata = ah->caldata; 335 struct ath9k_hw_cal_data *caldata = ah->caldata;
342 struct ath_common *common = ath9k_hw_common(ah); 336 struct ath_common *common = ath9k_hw_common(ah);
343 int qnum, ftype; 337 int ftype;
344 int chain_ok = 0; 338 int chain_ok = 0;
345 int chain; 339 int chain;
346 int len = 1800; 340 int len = 1800;
@@ -367,8 +361,7 @@ void ath_paprd_calibrate(struct work_struct *work)
367 memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN); 361 memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
368 362
369 memset(&txctl, 0, sizeof(txctl)); 363 memset(&txctl, 0, sizeof(txctl));
370 qnum = sc->tx.hwq_map[WME_AC_BE]; 364 txctl.txq = sc->tx.txq_map[WME_AC_BE];
371 txctl.txq = &sc->tx.txq[qnum];
372 365
373 ath9k_ps_wakeup(sc); 366 ath9k_ps_wakeup(sc);
374 ar9003_paprd_init_table(ah); 367 ar9003_paprd_init_table(ah);
@@ -386,6 +379,7 @@ void ath_paprd_calibrate(struct work_struct *work)
386 } 379 }
387 380
388 init_completion(&sc->paprd_complete); 381 init_completion(&sc->paprd_complete);
382 sc->paprd_pending = true;
389 ar9003_paprd_setup_gain_table(ah, chain); 383 ar9003_paprd_setup_gain_table(ah, chain);
390 txctl.paprd = BIT(chain); 384 txctl.paprd = BIT(chain);
391 if (ath_tx_start(hw, skb, &txctl) != 0) 385 if (ath_tx_start(hw, skb, &txctl) != 0)
@@ -393,6 +387,7 @@ void ath_paprd_calibrate(struct work_struct *work)
393 387
394 time_left = wait_for_completion_timeout(&sc->paprd_complete, 388 time_left = wait_for_completion_timeout(&sc->paprd_complete,
395 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); 389 msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
390 sc->paprd_pending = false;
396 if (!time_left) { 391 if (!time_left) {
397 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, 392 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
398 "Timeout waiting for paprd training on " 393 "Timeout waiting for paprd training on "
@@ -566,7 +561,6 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
566 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 561 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
567 sta->ht_cap.ampdu_factor); 562 sta->ht_cap.ampdu_factor);
568 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density); 563 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
569 an->last_rssi = ATH_RSSI_DUMMY_MARKER;
570 } 564 }
571} 565}
572 566
@@ -614,6 +608,8 @@ void ath9k_tasklet(unsigned long data)
614 return; 608 return;
615 } 609 }
616 610
611 spin_lock_bh(&sc->sc_pcu_lock);
612
617 if (!ath9k_hw_check_alive(ah)) 613 if (!ath9k_hw_check_alive(ah))
618 ieee80211_queue_work(sc->hw, &sc->hw_check_work); 614 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
619 615
@@ -624,15 +620,12 @@ void ath9k_tasklet(unsigned long data)
624 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 620 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
625 621
626 if (status & rxmask) { 622 if (status & rxmask) {
627 spin_lock_bh(&sc->rx.pcu_lock);
628
629 /* Check for high priority Rx first */ 623 /* Check for high priority Rx first */
630 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 624 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
631 (status & ATH9K_INT_RXHP)) 625 (status & ATH9K_INT_RXHP))
632 ath_rx_tasklet(sc, 0, true); 626 ath_rx_tasklet(sc, 0, true);
633 627
634 ath_rx_tasklet(sc, 0, false); 628 ath_rx_tasklet(sc, 0, false);
635 spin_unlock_bh(&sc->rx.pcu_lock);
636 } 629 }
637 630
638 if (status & ATH9K_INT_TX) { 631 if (status & ATH9K_INT_TX) {
@@ -657,7 +650,9 @@ void ath9k_tasklet(unsigned long data)
657 ath_gen_timer_isr(sc->sc_ah); 650 ath_gen_timer_isr(sc->sc_ah);
658 651
659 /* re-enable hardware interrupt */ 652 /* re-enable hardware interrupt */
660 ath9k_hw_set_interrupts(ah, ah->imask); 653 ath9k_hw_enable_interrupts(ah);
654
655 spin_unlock_bh(&sc->sc_pcu_lock);
661 ath9k_ps_restore(sc); 656 ath9k_ps_restore(sc);
662} 657}
663 658
@@ -756,7 +751,7 @@ irqreturn_t ath_isr(int irq, void *dev)
756 * interrupt; otherwise it will continue to 751 * interrupt; otherwise it will continue to
757 * fire. 752 * fire.
758 */ 753 */
759 ath9k_hw_set_interrupts(ah, 0); 754 ath9k_hw_disable_interrupts(ah);
760 /* 755 /*
761 * Let the hal handle the event. We assume 756 * Let the hal handle the event. We assume
762 * it will clear whatever condition caused 757 * it will clear whatever condition caused
@@ -765,7 +760,7 @@ irqreturn_t ath_isr(int irq, void *dev)
765 spin_lock(&common->cc_lock); 760 spin_lock(&common->cc_lock);
766 ath9k_hw_proc_mib_event(ah); 761 ath9k_hw_proc_mib_event(ah);
767 spin_unlock(&common->cc_lock); 762 spin_unlock(&common->cc_lock);
768 ath9k_hw_set_interrupts(ah, ah->imask); 763 ath9k_hw_enable_interrupts(ah);
769 } 764 }
770 765
771 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 766 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
@@ -782,8 +777,8 @@ chip_reset:
782 ath_debug_stat_interrupt(sc, status); 777 ath_debug_stat_interrupt(sc, status);
783 778
784 if (sched) { 779 if (sched) {
785 /* turn off every interrupt except SWBA */ 780 /* turn off every interrupt */
786 ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA)); 781 ath9k_hw_disable_interrupts(ah);
787 tasklet_schedule(&sc->intr_tq); 782 tasklet_schedule(&sc->intr_tq);
788 } 783 }
789 784
@@ -835,9 +830,11 @@ static u32 ath_get_extchanmode(struct ath_softc *sc,
835} 830}
836 831
837static void ath9k_bss_assoc_info(struct ath_softc *sc, 832static void ath9k_bss_assoc_info(struct ath_softc *sc,
833 struct ieee80211_hw *hw,
838 struct ieee80211_vif *vif, 834 struct ieee80211_vif *vif,
839 struct ieee80211_bss_conf *bss_conf) 835 struct ieee80211_bss_conf *bss_conf)
840{ 836{
837 struct ath_wiphy *aphy = hw->priv;
841 struct ath_hw *ah = sc->sc_ah; 838 struct ath_hw *ah = sc->sc_ah;
842 struct ath_common *common = ath9k_hw_common(ah); 839 struct ath_common *common = ath9k_hw_common(ah);
843 840
@@ -861,6 +858,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
861 ath_beacon_config(sc, vif); 858 ath_beacon_config(sc, vif);
862 859
863 /* Reset rssi stats */ 860 /* Reset rssi stats */
861 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
864 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 862 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
865 863
866 sc->sc_flags |= SC_OP_ANI_RUN; 864 sc->sc_flags |= SC_OP_ANI_RUN;
@@ -882,13 +880,13 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
882 int r; 880 int r;
883 881
884 ath9k_ps_wakeup(sc); 882 ath9k_ps_wakeup(sc);
883 spin_lock_bh(&sc->sc_pcu_lock);
884
885 ath9k_hw_configpcipowersave(ah, 0, 0); 885 ath9k_hw_configpcipowersave(ah, 0, 0);
886 886
887 if (!ah->curchan) 887 if (!ah->curchan)
888 ah->curchan = ath_get_curchannel(sc, sc->hw); 888 ah->curchan = ath_get_curchannel(sc, sc->hw);
889 889
890 spin_lock_bh(&sc->rx.pcu_lock);
891 spin_lock_bh(&sc->sc_resetlock);
892 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 890 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
893 if (r) { 891 if (r) {
894 ath_print(common, ATH_DBG_FATAL, 892 ath_print(common, ATH_DBG_FATAL,
@@ -896,17 +894,14 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
896 "reset status %d\n", 894 "reset status %d\n",
897 channel->center_freq, r); 895 channel->center_freq, r);
898 } 896 }
899 spin_unlock_bh(&sc->sc_resetlock);
900 897
901 ath_update_txpow(sc); 898 ath_update_txpow(sc);
902 if (ath_startrecv(sc) != 0) { 899 if (ath_startrecv(sc) != 0) {
903 ath_print(common, ATH_DBG_FATAL, 900 ath_print(common, ATH_DBG_FATAL,
904 "Unable to restart recv logic\n"); 901 "Unable to restart recv logic\n");
905 spin_unlock_bh(&sc->rx.pcu_lock); 902 spin_unlock_bh(&sc->sc_pcu_lock);
906 return; 903 return;
907 } 904 }
908 spin_unlock_bh(&sc->rx.pcu_lock);
909
910 if (sc->sc_flags & SC_OP_BEACONS) 905 if (sc->sc_flags & SC_OP_BEACONS)
911 ath_beacon_config(sc, NULL); /* restart beacons */ 906 ath_beacon_config(sc, NULL); /* restart beacons */
912 907
@@ -919,6 +914,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
919 ath9k_hw_set_gpio(ah, ah->led_pin, 0); 914 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
920 915
921 ieee80211_wake_queues(hw); 916 ieee80211_wake_queues(hw);
917 spin_unlock_bh(&sc->sc_pcu_lock);
918
922 ath9k_ps_restore(sc); 919 ath9k_ps_restore(sc);
923} 920}
924 921
@@ -929,6 +926,8 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
929 int r; 926 int r;
930 927
931 ath9k_ps_wakeup(sc); 928 ath9k_ps_wakeup(sc);
929 spin_lock_bh(&sc->sc_pcu_lock);
930
932 ieee80211_stop_queues(hw); 931 ieee80211_stop_queues(hw);
933 932
934 /* 933 /*
@@ -941,19 +940,16 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
941 } 940 }
942 941
943 /* Disable interrupts */ 942 /* Disable interrupts */
944 ath9k_hw_set_interrupts(ah, 0); 943 ath9k_hw_disable_interrupts(ah);
945 944
946 ath_drain_all_txq(sc, false); /* clear pending tx frames */ 945 ath_drain_all_txq(sc, false); /* clear pending tx frames */
947 946
948 spin_lock_bh(&sc->rx.pcu_lock);
949
950 ath_stoprecv(sc); /* turn off frame recv */ 947 ath_stoprecv(sc); /* turn off frame recv */
951 ath_flushrecv(sc); /* flush recv queue */ 948 ath_flushrecv(sc); /* flush recv queue */
952 949
953 if (!ah->curchan) 950 if (!ah->curchan)
954 ah->curchan = ath_get_curchannel(sc, hw); 951 ah->curchan = ath_get_curchannel(sc, hw);
955 952
956 spin_lock_bh(&sc->sc_resetlock);
957 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 953 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
958 if (r) { 954 if (r) {
959 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 955 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
@@ -961,14 +957,14 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
961 "reset status %d\n", 957 "reset status %d\n",
962 channel->center_freq, r); 958 channel->center_freq, r);
963 } 959 }
964 spin_unlock_bh(&sc->sc_resetlock);
965 960
966 ath9k_hw_phy_disable(ah); 961 ath9k_hw_phy_disable(ah);
967 962
968 spin_unlock_bh(&sc->rx.pcu_lock);
969
970 ath9k_hw_configpcipowersave(ah, 1, 1); 963 ath9k_hw_configpcipowersave(ah, 1, 1);
964
965 spin_unlock_bh(&sc->sc_pcu_lock);
971 ath9k_ps_restore(sc); 966 ath9k_ps_restore(sc);
967
972 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); 968 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
973} 969}
974 970
@@ -982,29 +978,25 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
982 /* Stop ANI */ 978 /* Stop ANI */
983 del_timer_sync(&common->ani.timer); 979 del_timer_sync(&common->ani.timer);
984 980
981 spin_lock_bh(&sc->sc_pcu_lock);
982
985 ieee80211_stop_queues(hw); 983 ieee80211_stop_queues(hw);
986 984
987 ath9k_hw_set_interrupts(ah, 0); 985 ath9k_hw_disable_interrupts(ah);
988 ath_drain_all_txq(sc, retry_tx); 986 ath_drain_all_txq(sc, retry_tx);
989 987
990 spin_lock_bh(&sc->rx.pcu_lock);
991
992 ath_stoprecv(sc); 988 ath_stoprecv(sc);
993 ath_flushrecv(sc); 989 ath_flushrecv(sc);
994 990
995 spin_lock_bh(&sc->sc_resetlock);
996 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 991 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
997 if (r) 992 if (r)
998 ath_print(common, ATH_DBG_FATAL, 993 ath_print(common, ATH_DBG_FATAL,
999 "Unable to reset hardware; reset status %d\n", r); 994 "Unable to reset hardware; reset status %d\n", r);
1000 spin_unlock_bh(&sc->sc_resetlock);
1001 995
1002 if (ath_startrecv(sc) != 0) 996 if (ath_startrecv(sc) != 0)
1003 ath_print(common, ATH_DBG_FATAL, 997 ath_print(common, ATH_DBG_FATAL,
1004 "Unable to start recv logic\n"); 998 "Unable to start recv logic\n");
1005 999
1006 spin_unlock_bh(&sc->rx.pcu_lock);
1007
1008 /* 1000 /*
1009 * We may be doing a reset in response to a request 1001 * We may be doing a reset in response to a request
1010 * that changes the channel so update any state that 1002 * that changes the channel so update any state that
@@ -1029,6 +1021,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1029 } 1021 }
1030 1022
1031 ieee80211_wake_queues(hw); 1023 ieee80211_wake_queues(hw);
1024 spin_unlock_bh(&sc->sc_pcu_lock);
1032 1025
1033 /* Start ANI */ 1026 /* Start ANI */
1034 ath_start_ani(common); 1027 ath_start_ani(common);
@@ -1036,56 +1029,6 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1036 return r; 1029 return r;
1037} 1030}
1038 1031
1039static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1040{
1041 int qnum;
1042
1043 switch (queue) {
1044 case 0:
1045 qnum = sc->tx.hwq_map[WME_AC_VO];
1046 break;
1047 case 1:
1048 qnum = sc->tx.hwq_map[WME_AC_VI];
1049 break;
1050 case 2:
1051 qnum = sc->tx.hwq_map[WME_AC_BE];
1052 break;
1053 case 3:
1054 qnum = sc->tx.hwq_map[WME_AC_BK];
1055 break;
1056 default:
1057 qnum = sc->tx.hwq_map[WME_AC_BE];
1058 break;
1059 }
1060
1061 return qnum;
1062}
1063
1064int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1065{
1066 int qnum;
1067
1068 switch (queue) {
1069 case WME_AC_VO:
1070 qnum = 0;
1071 break;
1072 case WME_AC_VI:
1073 qnum = 1;
1074 break;
1075 case WME_AC_BE:
1076 qnum = 2;
1077 break;
1078 case WME_AC_BK:
1079 qnum = 3;
1080 break;
1081 default:
1082 qnum = -1;
1083 break;
1084 }
1085
1086 return qnum;
1087}
1088
1089/* XXX: Remove me once we don't depend on ath9k_channel for all 1032/* XXX: Remove me once we don't depend on ath9k_channel for all
1090 * this redundant data */ 1033 * this redundant data */
1091void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, 1034void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
@@ -1167,19 +1110,16 @@ static int ath9k_start(struct ieee80211_hw *hw)
1167 * be followed by initialization of the appropriate bits 1110 * be followed by initialization of the appropriate bits
1168 * and then setup of the interrupt mask. 1111 * and then setup of the interrupt mask.
1169 */ 1112 */
1170 spin_lock_bh(&sc->rx.pcu_lock); 1113 spin_lock_bh(&sc->sc_pcu_lock);
1171 spin_lock_bh(&sc->sc_resetlock);
1172 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 1114 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
1173 if (r) { 1115 if (r) {
1174 ath_print(common, ATH_DBG_FATAL, 1116 ath_print(common, ATH_DBG_FATAL,
1175 "Unable to reset hardware; reset status %d " 1117 "Unable to reset hardware; reset status %d "
1176 "(freq %u MHz)\n", r, 1118 "(freq %u MHz)\n", r,
1177 curchan->center_freq); 1119 curchan->center_freq);
1178 spin_unlock_bh(&sc->sc_resetlock); 1120 spin_unlock_bh(&sc->sc_pcu_lock);
1179 spin_unlock_bh(&sc->rx.pcu_lock);
1180 goto mutex_unlock; 1121 goto mutex_unlock;
1181 } 1122 }
1182 spin_unlock_bh(&sc->sc_resetlock);
1183 1123
1184 /* 1124 /*
1185 * This is needed only to setup initial state 1125 * This is needed only to setup initial state
@@ -1198,10 +1138,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
1198 ath_print(common, ATH_DBG_FATAL, 1138 ath_print(common, ATH_DBG_FATAL,
1199 "Unable to start recv logic\n"); 1139 "Unable to start recv logic\n");
1200 r = -EIO; 1140 r = -EIO;
1201 spin_unlock_bh(&sc->rx.pcu_lock); 1141 spin_unlock_bh(&sc->sc_pcu_lock);
1202 goto mutex_unlock; 1142 goto mutex_unlock;
1203 } 1143 }
1204 spin_unlock_bh(&sc->rx.pcu_lock); 1144 spin_unlock_bh(&sc->sc_pcu_lock);
1205 1145
1206 /* Setup our intr mask. */ 1146 /* Setup our intr mask. */
1207 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL | 1147 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
@@ -1254,14 +1194,11 @@ mutex_unlock:
1254static int ath9k_tx(struct ieee80211_hw *hw, 1194static int ath9k_tx(struct ieee80211_hw *hw,
1255 struct sk_buff *skb) 1195 struct sk_buff *skb)
1256{ 1196{
1257 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1258 struct ath_wiphy *aphy = hw->priv; 1197 struct ath_wiphy *aphy = hw->priv;
1259 struct ath_softc *sc = aphy->sc; 1198 struct ath_softc *sc = aphy->sc;
1260 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1199 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1261 struct ath_tx_control txctl; 1200 struct ath_tx_control txctl;
1262 int padpos, padsize;
1263 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1201 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1264 int qnum;
1265 1202
1266 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) { 1203 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
1267 ath_print(common, ATH_DBG_XMIT, 1204 ath_print(common, ATH_DBG_XMIT,
@@ -1311,31 +1248,7 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1311 } 1248 }
1312 1249
1313 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1250 memset(&txctl, 0, sizeof(struct ath_tx_control));
1314 1251 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
1315 /*
1316 * As a temporary workaround, assign seq# here; this will likely need
1317 * to be cleaned up to work better with Beacon transmission and virtual
1318 * BSSes.
1319 */
1320 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1321 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1322 sc->tx.seq_no += 0x10;
1323 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1324 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1325 }
1326
1327 /* Add the padding after the header if this is not already done */
1328 padpos = ath9k_cmn_padpos(hdr->frame_control);
1329 padsize = padpos & 3;
1330 if (padsize && skb->len>padpos) {
1331 if (skb_headroom(skb) < padsize)
1332 return -1;
1333 skb_push(skb, padsize);
1334 memmove(skb->data, skb->data + padsize, padpos);
1335 }
1336
1337 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
1338 txctl.txq = &sc->tx.txq[qnum];
1339 1252
1340 ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb); 1253 ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
1341 1254
@@ -1399,22 +1312,25 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1399 ath9k_btcoex_timer_pause(sc); 1312 ath9k_btcoex_timer_pause(sc);
1400 } 1313 }
1401 1314
1315 spin_lock_bh(&sc->sc_pcu_lock);
1316
1402 /* make sure h/w will not generate any interrupt 1317 /* make sure h/w will not generate any interrupt
1403 * before setting the invalid flag. */ 1318 * before setting the invalid flag. */
1404 ath9k_hw_set_interrupts(ah, 0); 1319 ath9k_hw_disable_interrupts(ah);
1405 1320
1406 spin_lock_bh(&sc->rx.pcu_lock);
1407 if (!(sc->sc_flags & SC_OP_INVALID)) { 1321 if (!(sc->sc_flags & SC_OP_INVALID)) {
1408 ath_drain_all_txq(sc, false); 1322 ath_drain_all_txq(sc, false);
1409 ath_stoprecv(sc); 1323 ath_stoprecv(sc);
1410 ath9k_hw_phy_disable(ah); 1324 ath9k_hw_phy_disable(ah);
1411 } else 1325 } else
1412 sc->rx.rxlink = NULL; 1326 sc->rx.rxlink = NULL;
1413 spin_unlock_bh(&sc->rx.pcu_lock);
1414 1327
1415 /* disable HAL and put h/w to sleep */ 1328 /* disable HAL and put h/w to sleep */
1416 ath9k_hw_disable(ah); 1329 ath9k_hw_disable(ah);
1417 ath9k_hw_configpcipowersave(ah, 1, 1); 1330 ath9k_hw_configpcipowersave(ah, 1, 1);
1331
1332 spin_unlock_bh(&sc->sc_pcu_lock);
1333
1418 ath9k_ps_restore(sc); 1334 ath9k_ps_restore(sc);
1419 1335
1420 /* Finally, put the chip in FULL SLEEP mode */ 1336 /* Finally, put the chip in FULL SLEEP mode */
@@ -1830,12 +1746,15 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1830 struct ath_wiphy *aphy = hw->priv; 1746 struct ath_wiphy *aphy = hw->priv;
1831 struct ath_softc *sc = aphy->sc; 1747 struct ath_softc *sc = aphy->sc;
1832 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1748 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1749 struct ath_txq *txq;
1833 struct ath9k_tx_queue_info qi; 1750 struct ath9k_tx_queue_info qi;
1834 int ret = 0, qnum; 1751 int ret = 0;
1835 1752
1836 if (queue >= WME_NUM_AC) 1753 if (queue >= WME_NUM_AC)
1837 return 0; 1754 return 0;
1838 1755
1756 txq = sc->tx.txq_map[queue];
1757
1839 mutex_lock(&sc->mutex); 1758 mutex_lock(&sc->mutex);
1840 1759
1841 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); 1760 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -1844,20 +1763,19 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1844 qi.tqi_cwmin = params->cw_min; 1763 qi.tqi_cwmin = params->cw_min;
1845 qi.tqi_cwmax = params->cw_max; 1764 qi.tqi_cwmax = params->cw_max;
1846 qi.tqi_burstTime = params->txop; 1765 qi.tqi_burstTime = params->txop;
1847 qnum = ath_get_hal_qnum(queue, sc);
1848 1766
1849 ath_print(common, ATH_DBG_CONFIG, 1767 ath_print(common, ATH_DBG_CONFIG,
1850 "Configure tx [queue/halq] [%d/%d], " 1768 "Configure tx [queue/halq] [%d/%d], "
1851 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", 1769 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
1852 queue, qnum, params->aifs, params->cw_min, 1770 queue, txq->axq_qnum, params->aifs, params->cw_min,
1853 params->cw_max, params->txop); 1771 params->cw_max, params->txop);
1854 1772
1855 ret = ath_txq_update(sc, qnum, &qi); 1773 ret = ath_txq_update(sc, txq->axq_qnum, &qi);
1856 if (ret) 1774 if (ret)
1857 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n"); 1775 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
1858 1776
1859 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) 1777 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
1860 if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret) 1778 if (queue == WME_AC_BE && !ret)
1861 ath_beaconq_config(sc); 1779 ath_beaconq_config(sc);
1862 1780
1863 mutex_unlock(&sc->mutex); 1781 mutex_unlock(&sc->mutex);
@@ -2019,7 +1937,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2019 if (changed & BSS_CHANGED_ASSOC) { 1937 if (changed & BSS_CHANGED_ASSOC) {
2020 ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", 1938 ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
2021 bss_conf->assoc); 1939 bss_conf->assoc);
2022 ath9k_bss_assoc_info(sc, vif, bss_conf); 1940 ath9k_bss_assoc_info(sc, hw, vif, bss_conf);
2023 } 1941 }
2024 1942
2025 mutex_unlock(&sc->mutex); 1943 mutex_unlock(&sc->mutex);
@@ -2082,6 +2000,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2082 case IEEE80211_AMPDU_RX_STOP: 2000 case IEEE80211_AMPDU_RX_STOP:
2083 break; 2001 break;
2084 case IEEE80211_AMPDU_TX_START: 2002 case IEEE80211_AMPDU_TX_START:
2003 if (!(sc->sc_flags & SC_OP_TXAGGR))
2004 return -EOPNOTSUPP;
2005
2085 ath9k_ps_wakeup(sc); 2006 ath9k_ps_wakeup(sc);
2086 ret = ath_tx_aggr_start(sc, sta, tid, ssn); 2007 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
2087 if (!ret) 2008 if (!ret)
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index b5b651413e7..09f69a9617f 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/ath9k_platform.h>
19#include "ath9k.h" 20#include "ath9k.h"
20 21
21static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { 22static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
@@ -53,21 +54,36 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
53 54
54static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data) 55static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
55{ 56{
56 struct ath_hw *ah = (struct ath_hw *) common->ah; 57 struct ath_softc *sc = (struct ath_softc *) common->priv;
57 58 struct ath9k_platform_data *pdata = sc->dev->platform_data;
58 common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); 59
59 60 if (pdata) {
60 if (!ath9k_hw_wait(ah, 61 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
61 AR_EEPROM_STATUS_DATA, 62 ath_print(common, ATH_DBG_FATAL,
62 AR_EEPROM_STATUS_DATA_BUSY | 63 "%s: eeprom read failed, offset %08x "
63 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0, 64 "is out of range\n",
64 AH_WAIT_TIMEOUT)) { 65 __func__, off);
65 return false; 66 }
67
68 *data = pdata->eeprom_data[off];
69 } else {
70 struct ath_hw *ah = (struct ath_hw *) common->ah;
71
72 common->ops->read(ah, AR5416_EEPROM_OFFSET +
73 (off << AR5416_EEPROM_S));
74
75 if (!ath9k_hw_wait(ah,
76 AR_EEPROM_STATUS_DATA,
77 AR_EEPROM_STATUS_DATA_BUSY |
78 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
79 AH_WAIT_TIMEOUT)) {
80 return false;
81 }
82
83 *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
84 AR_EEPROM_STATUS_DATA_VAL);
66 } 85 }
67 86
68 *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
69 AR_EEPROM_STATUS_DATA_VAL);
70
71 return true; 87 return true;
72} 88}
73 89
@@ -247,34 +263,25 @@ static void ath_pci_remove(struct pci_dev *pdev)
247 263
248#ifdef CONFIG_PM 264#ifdef CONFIG_PM
249 265
250static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state) 266static int ath_pci_suspend(struct device *device)
251{ 267{
268 struct pci_dev *pdev = to_pci_dev(device);
252 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 269 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
253 struct ath_wiphy *aphy = hw->priv; 270 struct ath_wiphy *aphy = hw->priv;
254 struct ath_softc *sc = aphy->sc; 271 struct ath_softc *sc = aphy->sc;
255 272
256 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 273 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
257 274
258 pci_save_state(pdev);
259 pci_disable_device(pdev);
260 pci_set_power_state(pdev, PCI_D3hot);
261
262 return 0; 275 return 0;
263} 276}
264 277
265static int ath_pci_resume(struct pci_dev *pdev) 278static int ath_pci_resume(struct device *device)
266{ 279{
280 struct pci_dev *pdev = to_pci_dev(device);
267 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 281 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
268 struct ath_wiphy *aphy = hw->priv; 282 struct ath_wiphy *aphy = hw->priv;
269 struct ath_softc *sc = aphy->sc; 283 struct ath_softc *sc = aphy->sc;
270 u32 val; 284 u32 val;
271 int err;
272
273 pci_restore_state(pdev);
274
275 err = pci_enable_device(pdev);
276 if (err)
277 return err;
278 285
279 /* 286 /*
280 * Suspend/Resume resets the PCI configuration space, so we have to 287 * Suspend/Resume resets the PCI configuration space, so we have to
@@ -293,7 +300,23 @@ static int ath_pci_resume(struct pci_dev *pdev)
293 return 0; 300 return 0;
294} 301}
295 302
296#endif /* CONFIG_PM */ 303static const struct dev_pm_ops ath9k_pm_ops = {
304 .suspend = ath_pci_suspend,
305 .resume = ath_pci_resume,
306 .freeze = ath_pci_suspend,
307 .thaw = ath_pci_resume,
308 .poweroff = ath_pci_suspend,
309 .restore = ath_pci_resume,
310};
311
312#define ATH9K_PM_OPS (&ath9k_pm_ops)
313
314#else /* !CONFIG_PM */
315
316#define ATH9K_PM_OPS NULL
317
318#endif /* !CONFIG_PM */
319
297 320
298MODULE_DEVICE_TABLE(pci, ath_pci_id_table); 321MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
299 322
@@ -302,10 +325,7 @@ static struct pci_driver ath_pci_driver = {
302 .id_table = ath_pci_id_table, 325 .id_table = ath_pci_id_table,
303 .probe = ath_pci_probe, 326 .probe = ath_pci_probe,
304 .remove = ath_pci_remove, 327 .remove = ath_pci_remove,
305#ifdef CONFIG_PM 328 .driver.pm = ATH9K_PM_OPS,
306 .suspend = ath_pci_suspend,
307 .resume = ath_pci_resume,
308#endif /* CONFIG_PM */
309}; 329};
310 330
311int ath_pci_init(void) 331int ath_pci_init(void)
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 89978d71617..3e6ea3bc3d8 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -381,25 +381,6 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
381static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, 381static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
382 struct ieee80211_tx_rate *rate); 382 struct ieee80211_tx_rate *rate);
383 383
384static inline int8_t median(int8_t a, int8_t b, int8_t c)
385{
386 if (a >= b) {
387 if (b >= c)
388 return b;
389 else if (a > c)
390 return c;
391 else
392 return a;
393 } else {
394 if (a >= c)
395 return a;
396 else if (b >= c)
397 return c;
398 else
399 return b;
400 }
401}
402
403static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table, 384static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
404 struct ath_rate_priv *ath_rc_priv) 385 struct ath_rate_priv *ath_rc_priv)
405{ 386{
@@ -883,7 +864,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
883 bool state_change = false; 864 bool state_change = false;
884 int count, n_bad_frames; 865 int count, n_bad_frames;
885 u8 last_per; 866 u8 last_per;
886 static u32 nretry_to_per_lookup[10] = { 867 static const u32 nretry_to_per_lookup[10] = {
887 100 * 0 / 1, 868 100 * 0 / 1,
888 100 * 1 / 4, 869 100 * 1 / 4,
889 100 * 1 / 2, 870 100 * 1 / 2,
@@ -1106,13 +1087,13 @@ static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
1106 struct ieee80211_tx_rate *rate) 1087 struct ieee80211_tx_rate *rate)
1107{ 1088{
1108 int rix = 0, i = 0; 1089 int rix = 0, i = 0;
1109 int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 }; 1090 static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
1110 1091
1111 if (!(rate->flags & IEEE80211_TX_RC_MCS)) 1092 if (!(rate->flags & IEEE80211_TX_RC_MCS))
1112 return rate->idx; 1093 return rate->idx;
1113 1094
1114 while (rate->idx > mcs_rix_off[i] && 1095 while (rate->idx > mcs_rix_off[i] &&
1115 i < sizeof(mcs_rix_off)/sizeof(int)) { 1096 i < ARRAY_SIZE(mcs_rix_off)) {
1116 rix++; i++; 1097 rix++; i++;
1117 } 1098 }
1118 1099
@@ -1373,23 +1354,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1373 tx_info->status.ampdu_len = 1; 1354 tx_info->status.ampdu_len = 1;
1374 } 1355 }
1375 1356
1376 /* 1357 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
1377 * If an underrun error is seen assume it as an excessive retry only
1378 * if max frame trigger level has been reached (2 KB for singel stream,
1379 * and 4 KB for dual stream). Adjust the long retry as if the frame was
1380 * tried hw->max_rate_tries times to affect how ratectrl updates PER for
1381 * the failed rate. In case of congestion on the bus penalizing these
1382 * type of underruns should help hardware actually transmit new frames
1383 * successfully by eventually preferring slower rates. This itself
1384 * should also alleviate congestion on the bus.
1385 */
1386 if ((tx_info->pad[0] & ATH_TX_INFO_UNDERRUN) &&
1387 (sc->sc_ah->tx_trig_level >= ath_rc_priv->tx_triglevel_max)) {
1388 tx_status = 1;
1389 is_underrun = 1;
1390 }
1391
1392 if (tx_info->pad[0] & ATH_TX_INFO_XRETRY)
1393 tx_status = 1; 1358 tx_status = 1;
1394 1359
1395 ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status, 1360 ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
@@ -1398,7 +1363,8 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1398 /* Check if aggregation has to be enabled for this tid */ 1363 /* Check if aggregation has to be enabled for this tid */
1399 if (conf_is_ht(&sc->hw->conf) && 1364 if (conf_is_ht(&sc->hw->conf) &&
1400 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { 1365 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
1401 if (ieee80211_is_data_qos(fc)) { 1366 if (ieee80211_is_data_qos(fc) &&
1367 skb_get_queue_mapping(skb) != IEEE80211_AC_VO) {
1402 u8 *qc, tid; 1368 u8 *qc, tid;
1403 struct ath_node *an; 1369 struct ath_node *an;
1404 1370
@@ -1444,12 +1410,12 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1444 ath_rc_priv->neg_ht_rates.rs_nrates = j; 1410 ath_rc_priv->neg_ht_rates.rs_nrates = j;
1445 } 1411 }
1446 1412
1447 is_cw40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; 1413 is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
1448 1414
1449 if (is_cw40) 1415 if (is_cw40)
1450 is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; 1416 is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
1451 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) 1417 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
1452 is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20; 1418 is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
1453 1419
1454 /* Choose rate table first */ 1420 /* Choose rate table first */
1455 1421
@@ -1468,10 +1434,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1468 struct ath_rate_priv *ath_rc_priv = priv_sta; 1434 struct ath_rate_priv *ath_rc_priv = priv_sta;
1469 const struct ath_rate_table *rate_table = NULL; 1435 const struct ath_rate_table *rate_table = NULL;
1470 bool oper_cw40 = false, oper_sgi; 1436 bool oper_cw40 = false, oper_sgi;
1471 bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ? 1437 bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
1472 true : false; 1438 bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
1473 bool local_sgi = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ?
1474 true : false;
1475 1439
1476 /* FIXME: Handle AP mode later when we support CWM */ 1440 /* FIXME: Handle AP mode later when we support CWM */
1477 1441
@@ -1617,8 +1581,6 @@ static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp
1617 return NULL; 1581 return NULL;
1618 } 1582 }
1619 1583
1620 rate_priv->tx_triglevel_max = sc->sc_ah->caps.tx_triglevel_max;
1621
1622 return rate_priv; 1584 return rate_priv;
1623} 1585}
1624 1586
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 2f46a2266ba..31a004cb60a 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -215,7 +215,6 @@ struct ath_rate_priv {
215 u32 per_down_time; 215 u32 per_down_time;
216 u32 probe_interval; 216 u32 probe_interval;
217 u32 prev_data_rix; 217 u32 prev_data_rix;
218 u32 tx_triglevel_max;
219 struct ath_rateset neg_rates; 218 struct ath_rateset neg_rates;
220 struct ath_rateset neg_ht_rates; 219 struct ath_rateset neg_ht_rates;
221 struct ath_rate_softc *asc; 220 struct ath_rate_softc *asc;
@@ -225,11 +224,6 @@ struct ath_rate_priv {
225 struct ath_rc_stats rcstats[RATE_TABLE_SIZE]; 224 struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
226}; 225};
227 226
228#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0)
229#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1)
230#define ATH_TX_INFO_XRETRY (1 << 3)
231#define ATH_TX_INFO_UNDERRUN (1 << 4)
232
233enum ath9k_internal_frame_type { 227enum ath9k_internal_frame_type {
234 ATH9K_IFT_NOT_INTERNAL, 228 ATH9K_IFT_NOT_INTERNAL,
235 ATH9K_IFT_PAUSE, 229 ATH9K_IFT_PAUSE,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 1a62e351ec7..262c81595f6 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -317,7 +317,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
317 struct ath_buf *bf; 317 struct ath_buf *bf;
318 int error = 0; 318 int error = 0;
319 319
320 spin_lock_init(&sc->rx.pcu_lock); 320 spin_lock_init(&sc->sc_pcu_lock);
321 sc->sc_flags &= ~SC_OP_RXFLUSH; 321 sc->sc_flags &= ~SC_OP_RXFLUSH;
322 spin_lock_init(&sc->rx.rxbuflock); 322 spin_lock_init(&sc->rx.rxbuflock);
323 323
@@ -528,6 +528,8 @@ bool ath_stoprecv(struct ath_softc *sc)
528 sc->rx.rxlink = NULL; 528 sc->rx.rxlink = NULL;
529 spin_unlock_bh(&sc->rx.rxbuflock); 529 spin_unlock_bh(&sc->rx.rxbuflock);
530 530
531 ATH_DBG_WARN(!stopped, "Could not stop RX, we could be "
532 "confusing the DMA engine when we start RX up\n");
531 return stopped; 533 return stopped;
532} 534}
533 535
@@ -962,36 +964,23 @@ static void ath9k_process_rssi(struct ath_common *common,
962 struct ieee80211_hdr *hdr, 964 struct ieee80211_hdr *hdr,
963 struct ath_rx_status *rx_stats) 965 struct ath_rx_status *rx_stats)
964{ 966{
967 struct ath_wiphy *aphy = hw->priv;
965 struct ath_hw *ah = common->ah; 968 struct ath_hw *ah = common->ah;
966 struct ieee80211_sta *sta; 969 int last_rssi;
967 struct ath_node *an;
968 int last_rssi = ATH_RSSI_DUMMY_MARKER;
969 __le16 fc; 970 __le16 fc;
970 971
972 if (ah->opmode != NL80211_IFTYPE_STATION)
973 return;
974
971 fc = hdr->frame_control; 975 fc = hdr->frame_control;
976 if (!ieee80211_is_beacon(fc) ||
977 compare_ether_addr(hdr->addr3, common->curbssid))
978 return;
972 979
973 rcu_read_lock(); 980 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
974 /* 981 ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi);
975 * XXX: use ieee80211_find_sta! This requires quite a bit of work
976 * under the current ath9k virtual wiphy implementation as we have
977 * no way of tying a vif to wiphy. Typically vifs are attached to
978 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
979 * wiphy you'd have to iterate over every wiphy and each sdata.
980 */
981 if (is_multicast_ether_addr(hdr->addr1))
982 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
983 else
984 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, hdr->addr1);
985
986 if (sta) {
987 an = (struct ath_node *) sta->drv_priv;
988 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
989 !rx_stats->rs_moreaggr)
990 ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
991 last_rssi = an->last_rssi;
992 }
993 rcu_read_unlock();
994 982
983 last_rssi = aphy->last_rssi;
995 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 984 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
996 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 985 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
997 ATH_RSSI_EP_MULTIPLIER); 986 ATH_RSSI_EP_MULTIPLIER);
@@ -999,8 +988,7 @@ static void ath9k_process_rssi(struct ath_common *common,
999 rx_stats->rs_rssi = 0; 988 rx_stats->rs_rssi = 0;
1000 989
1001 /* Update Beacon RSSI, this is used by ANI. */ 990 /* Update Beacon RSSI, this is used by ANI. */
1002 if (ieee80211_is_beacon(fc)) 991 ah->stats.avgbrssi = rx_stats->rs_rssi;
1003 ah->stats.avgbrssi = rx_stats->rs_rssi;
1004} 992}
1005 993
1006/* 994/*
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 2c6a22fbb0f..c2472edab5e 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -864,15 +864,7 @@
864 ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1)) 864 ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
865 865
866#define AR_DEVID_7010(_ah) \ 866#define AR_DEVID_7010(_ah) \
867 (((_ah)->hw_version.devid == 0x7010) || \ 867 ((_ah)->common.driver_info & AR7010_DEVICE)
868 ((_ah)->hw_version.devid == 0x7015) || \
869 ((_ah)->hw_version.devid == 0x9018) || \
870 ((_ah)->hw_version.devid == 0xA704) || \
871 ((_ah)->hw_version.devid == 0x1200))
872
873#define AR9287_HTC_DEVID(_ah) \
874 (((_ah)->hw_version.devid == 0x7015) || \
875 ((_ah)->hw_version.devid == 0x1200))
876 868
877#define AR_RADIO_SREV_MAJOR 0xf0 869#define AR_RADIO_SREV_MAJOR 0xf0
878#define AR_RAD5133_SREV_MAJOR 0xc0 870#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -1074,6 +1066,9 @@ enum {
1074#define AR_INTR_PRIO_ASYNC_MASK 0x40c8 1066#define AR_INTR_PRIO_ASYNC_MASK 0x40c8
1075#define AR_INTR_PRIO_SYNC_MASK 0x40cc 1067#define AR_INTR_PRIO_SYNC_MASK 0x40cc
1076#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4 1068#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4
1069#define AR_ENT_OTP 0x40d8
1070#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
1071#define AR_ENT_OTP_MPSD 0x00800000
1077 1072
1078#define AR_RTC_9300_PLL_DIV 0x000003ff 1073#define AR_RTC_9300_PLL_DIV 0x000003ff
1079#define AR_RTC_9300_PLL_DIV_S 0 1074#define AR_RTC_9300_PLL_DIV_S 0
@@ -1574,6 +1569,7 @@ enum {
1574#define AR_PCU_TBTT_PROTECT 0x00200000 1569#define AR_PCU_TBTT_PROTECT 0x00200000
1575#define AR_PCU_CLEAR_VMF 0x01000000 1570#define AR_PCU_CLEAR_VMF 0x01000000
1576#define AR_PCU_CLEAR_BA_VALID 0x04000000 1571#define AR_PCU_CLEAR_BA_VALID 0x04000000
1572#define AR_PCU_ALWAYS_PERFORM_KEYSEARCH 0x10000000
1577 1573
1578#define AR_PCU_BT_ANT_PREVENT_RX 0x00100000 1574#define AR_PCU_BT_ANT_PREVENT_RX 0x00100000
1579#define AR_PCU_BT_ANT_PREVENT_RX_S 20 1575#define AR_PCU_BT_ANT_PREVENT_RX_S 20
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index ec7cf5ee56b..d5442c3745c 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -107,6 +107,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
107 aphy->sc = sc; 107 aphy->sc = sc;
108 aphy->hw = hw; 108 aphy->hw = hw;
109 sc->sec_wiphy[i] = aphy; 109 sc->sec_wiphy[i] = aphy;
110 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
110 spin_unlock_bh(&sc->wiphy_lock); 111 spin_unlock_bh(&sc->wiphy_lock);
111 112
112 memcpy(addr, common->macaddr, ETH_ALEN); 113 memcpy(addr, common->macaddr, ETH_ALEN);
@@ -186,7 +187,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
186 info->control.rates[1].idx = -1; 187 info->control.rates[1].idx = -1;
187 188
188 memset(&txctl, 0, sizeof(struct ath_tx_control)); 189 memset(&txctl, 0, sizeof(struct ath_tx_control));
189 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]]; 190 txctl.txq = sc->tx.txq_map[WME_AC_VO];
190 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE; 191 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
191 192
192 if (ath_tx_start(aphy->hw, skb, &txctl) != 0) 193 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
@@ -304,13 +305,12 @@ void ath9k_wiphy_chan_work(struct work_struct *work)
304 * ath9k version of ieee80211_tx_status() for TX frames that are generated 305 * ath9k version of ieee80211_tx_status() for TX frames that are generated
305 * internally in the driver. 306 * internally in the driver.
306 */ 307 */
307void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) 308void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
308{ 309{
309 struct ath_wiphy *aphy = hw->priv; 310 struct ath_wiphy *aphy = hw->priv;
310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 311 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
311 312
312 if ((tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE) && 313 if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
313 aphy->state == ATH_WIPHY_PAUSING) {
314 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) { 314 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
315 printk(KERN_DEBUG "ath9k: %s: no ACK for pause " 315 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
316 "frame\n", wiphy_name(hw->wiphy)); 316 "frame\n", wiphy_name(hw->wiphy));
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f2ade2402ce..495432ec85a 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -48,19 +48,17 @@ static u16 bits_per_symbol[][2] = {
48 48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80) 49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50 50
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 51static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid, 52 struct ath_atx_tid *tid,
53 struct list_head *bf_head); 53 struct list_head *bf_head);
54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55 struct ath_txq *txq, struct list_head *bf_q, 55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar); 56 struct ath_tx_status *ts, int txok, int sendbar);
57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head); 58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 60static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
63 int nbad, int txok, bool update_rc); 61 int nframes, int nbad, int txok, bool update_rc);
64static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno); 63 int seqno);
66 64
@@ -124,7 +122,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
124 122
125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
126{ 124{
127 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 125 struct ath_txq *txq = tid->ac->txq;
128 126
129 WARN_ON(!tid->paused); 127 WARN_ON(!tid->paused);
130 128
@@ -140,12 +138,21 @@ unlock:
140 spin_unlock_bh(&txq->axq_lock); 138 spin_unlock_bh(&txq->axq_lock);
141} 139}
142 140
141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
147}
148
143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
144{ 150{
145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 151 struct ath_txq *txq = tid->ac->txq;
146 struct ath_buf *bf; 152 struct ath_buf *bf;
147 struct list_head bf_head; 153 struct list_head bf_head;
148 struct ath_tx_status ts; 154 struct ath_tx_status ts;
155 struct ath_frame_info *fi;
149 156
150 INIT_LIST_HEAD(&bf_head); 157 INIT_LIST_HEAD(&bf_head);
151 158
@@ -156,12 +163,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
157 list_move_tail(&bf->list, &bf_head); 164 list_move_tail(&bf->list, &bf_head);
158 165
159 if (bf_isretried(bf)) { 166 spin_unlock_bh(&txq->axq_lock);
160 ath_tx_update_baw(sc, tid, bf->bf_seqno); 167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else { 171 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 172 ath_tx_send_normal(sc, txq, tid, &bf_head);
164 } 173 }
174 spin_lock_bh(&txq->axq_lock);
165 } 175 }
166 176
167 spin_unlock_bh(&txq->axq_lock); 177 spin_unlock_bh(&txq->axq_lock);
@@ -184,14 +194,11 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
184} 194}
185 195
186static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 struct ath_buf *bf) 197 u16 seqno)
188{ 198{
189 int index, cindex; 199 int index, cindex;
190 200
191 if (bf_isretried(bf)) 201 index = ATH_BA_INDEX(tid->seq_start, seqno);
192 return;
193
194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
196 __set_bit(cindex, tid->tx_buf); 203 __set_bit(cindex, tid->tx_buf);
197 204
@@ -215,6 +222,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
215 struct ath_buf *bf; 222 struct ath_buf *bf;
216 struct list_head bf_head; 223 struct list_head bf_head;
217 struct ath_tx_status ts; 224 struct ath_tx_status ts;
225 struct ath_frame_info *fi;
218 226
219 memset(&ts, 0, sizeof(ts)); 227 memset(&ts, 0, sizeof(ts));
220 INIT_LIST_HEAD(&bf_head); 228 INIT_LIST_HEAD(&bf_head);
@@ -226,8 +234,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
226 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
227 list_move_tail(&bf->list, &bf_head); 235 list_move_tail(&bf->list, &bf_head);
228 236
229 if (bf_isretried(bf)) 237 fi = get_frame_info(bf->bf_mpdu);
230 ath_tx_update_baw(sc, tid, bf->bf_seqno); 238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
231 240
232 spin_unlock(&txq->axq_lock); 241 spin_unlock(&txq->axq_lock);
233 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
@@ -239,16 +248,15 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
239} 248}
240 249
241static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
242 struct ath_buf *bf) 251 struct sk_buff *skb)
243{ 252{
244 struct sk_buff *skb; 253 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
245 struct ieee80211_hdr *hdr; 254 struct ieee80211_hdr *hdr;
246 255
247 bf->bf_state.bf_type |= BUF_RETRY;
248 bf->bf_retries++;
249 TX_STAT_INC(txq->axq_qnum, a_retries); 256 TX_STAT_INC(txq->axq_qnum, a_retries);
257 if (tx_info->control.rates[4].count++ > 0)
258 return;
250 259
251 skb = bf->bf_mpdu;
252 hdr = (struct ieee80211_hdr *)skb->data; 260 hdr = (struct ieee80211_hdr *)skb->data;
253 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
254} 262}
@@ -298,9 +306,41 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
298 return tbf; 306 return tbf;
299} 307}
300 308
309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
313 struct ath_frame_info *fi;
314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
329 fi = get_frame_info(bf->bf_mpdu);
330 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
301static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
302 struct ath_buf *bf, struct list_head *bf_q, 342 struct ath_buf *bf, struct list_head *bf_q,
303 struct ath_tx_status *ts, int txok) 343 struct ath_tx_status *ts, int txok, bool retry)
304{ 344{
305 struct ath_node *an = NULL; 345 struct ath_node *an = NULL;
306 struct sk_buff *skb; 346 struct sk_buff *skb;
@@ -316,7 +356,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
316 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 356 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
317 bool rc_update = true; 357 bool rc_update = true;
318 struct ieee80211_tx_rate rates[4]; 358 struct ieee80211_tx_rate rates[4];
359 struct ath_frame_info *fi;
319 int nframes; 360 int nframes;
361 u8 tidno;
320 362
321 skb = bf->bf_mpdu; 363 skb = bf->bf_mpdu;
322 hdr = (struct ieee80211_hdr *)skb->data; 364 hdr = (struct ieee80211_hdr *)skb->data;
@@ -325,7 +367,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
325 hw = bf->aphy->hw; 367 hw = bf->aphy->hw;
326 368
327 memcpy(rates, tx_info->control.rates, sizeof(rates)); 369 memcpy(rates, tx_info->control.rates, sizeof(rates));
328 nframes = bf->bf_nframes;
329 370
330 rcu_read_lock(); 371 rcu_read_lock();
331 372
@@ -342,7 +383,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 !bf->bf_stale || bf_next != NULL) 383 !bf->bf_stale || bf_next != NULL)
343 list_move_tail(&bf->list, &bf_head); 384 list_move_tail(&bf->list, &bf_head);
344 385
345 ath_tx_rc_status(bf, ts, 1, 0, false); 386 ath_tx_rc_status(bf, ts, 1, 1, 0, false);
346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
347 0, 0); 388 0, 0);
348 389
@@ -352,14 +393,15 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
352 } 393 }
353 394
354 an = (struct ath_node *)sta->drv_priv; 395 an = (struct ath_node *)sta->drv_priv;
355 tid = ATH_AN_2_TID(an, bf->bf_tidno); 396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
356 398
357 /* 399 /*
358 * The hardware occasionally sends a tx status for the wrong TID. 400 * The hardware occasionally sends a tx status for the wrong TID.
359 * In this case, the BA status cannot be considered valid and all 401 * In this case, the BA status cannot be considered valid and all
360 * subframes need to be retransmitted 402 * subframes need to be retransmitted
361 */ 403 */
362 if (bf->bf_tidno != ts->tid) 404 if (tidno != ts->tid)
363 txok = false; 405 txok = false;
364 406
365 isaggr = bf_isaggr(bf); 407 isaggr = bf_isaggr(bf);
@@ -385,15 +427,16 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
385 INIT_LIST_HEAD(&bf_pending); 427 INIT_LIST_HEAD(&bf_pending);
386 INIT_LIST_HEAD(&bf_head); 428 INIT_LIST_HEAD(&bf_head);
387 429
388 nbad = ath_tx_num_badfrms(sc, bf, ts, txok); 430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
389 while (bf) { 431 while (bf) {
390 txfail = txpending = 0; 432 txfail = txpending = 0;
391 bf_next = bf->bf_next; 433 bf_next = bf->bf_next;
392 434
393 skb = bf->bf_mpdu; 435 skb = bf->bf_mpdu;
394 tx_info = IEEE80211_SKB_CB(skb); 436 tx_info = IEEE80211_SKB_CB(skb);
437 fi = get_frame_info(skb);
395 438
396 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { 439 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
397 /* transmit completion, subframe is 440 /* transmit completion, subframe is
398 * acked by block ack */ 441 * acked by block ack */
399 acked_cnt++; 442 acked_cnt++;
@@ -401,10 +444,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
401 /* transmit completion */ 444 /* transmit completion */
402 acked_cnt++; 445 acked_cnt++;
403 } else { 446 } else {
404 if (!(tid->state & AGGR_CLEANUP) && 447 if (!(tid->state & AGGR_CLEANUP) && retry) {
405 !bf_last->bf_tx_aborted) { 448 if (fi->retries < ATH_MAX_SW_RETRIES) {
406 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 449 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
407 ath_tx_set_retry(sc, txq, bf);
408 txpending = 1; 450 txpending = 1;
409 } else { 451 } else {
410 bf->bf_state.bf_type |= BUF_XRETRY; 452 bf->bf_state.bf_type |= BUF_XRETRY;
@@ -442,16 +484,15 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
442 * block-ack window 484 * block-ack window
443 */ 485 */
444 spin_lock_bh(&txq->axq_lock); 486 spin_lock_bh(&txq->axq_lock);
445 ath_tx_update_baw(sc, tid, bf->bf_seqno); 487 ath_tx_update_baw(sc, tid, fi->seqno);
446 spin_unlock_bh(&txq->axq_lock); 488 spin_unlock_bh(&txq->axq_lock);
447 489
448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
449 memcpy(tx_info->control.rates, rates, sizeof(rates)); 491 memcpy(tx_info->control.rates, rates, sizeof(rates));
450 bf->bf_nframes = nframes; 492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
451 ath_tx_rc_status(bf, ts, nbad, txok, true);
452 rc_update = false; 493 rc_update = false;
453 } else { 494 } else {
454 ath_tx_rc_status(bf, ts, nbad, txok, false); 495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
455 } 496 }
456 497
457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
@@ -470,14 +511,13 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
470 */ 511 */
471 if (!tbf) { 512 if (!tbf) {
472 spin_lock_bh(&txq->axq_lock); 513 spin_lock_bh(&txq->axq_lock);
473 ath_tx_update_baw(sc, tid, 514 ath_tx_update_baw(sc, tid, fi->seqno);
474 bf->bf_seqno);
475 spin_unlock_bh(&txq->axq_lock); 515 spin_unlock_bh(&txq->axq_lock);
476 516
477 bf->bf_state.bf_type |= 517 bf->bf_state.bf_type |=
478 BUF_XRETRY; 518 BUF_XRETRY;
479 ath_tx_rc_status(bf, ts, nbad, 519 ath_tx_rc_status(bf, ts, nframes,
480 0, false); 520 nbad, 0, false);
481 ath_tx_complete_buf(sc, bf, txq, 521 ath_tx_complete_buf(sc, bf, txq,
482 &bf_head, 522 &bf_head,
483 ts, 0, 0); 523 ts, 0, 0);
@@ -611,6 +651,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
611 u16 minlen; 651 u16 minlen;
612 u8 flags, rix; 652 u8 flags, rix;
613 int width, streams, half_gi, ndelim, mindelim; 653 int width, streams, half_gi, ndelim, mindelim;
654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
614 655
615 /* Select standard number of delimiters based on frame length alone */ 656 /* Select standard number of delimiters based on frame length alone */
616 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
@@ -621,7 +662,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
621 * TODO - this could be improved to be dependent on the rate. 662 * TODO - this could be improved to be dependent on the rate.
622 * The hardware can keep up at lower rates, but not higher rates 663 * The hardware can keep up at lower rates, but not higher rates
623 */ 664 */
624 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) 665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
625 ndelim += ATH_AGGR_ENCRYPTDELIM; 666 ndelim += ATH_AGGR_ENCRYPTDELIM;
626 667
627 /* 668 /*
@@ -665,7 +706,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
665static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
666 struct ath_txq *txq, 707 struct ath_txq *txq,
667 struct ath_atx_tid *tid, 708 struct ath_atx_tid *tid,
668 struct list_head *bf_q) 709 struct list_head *bf_q,
710 int *aggr_len)
669{ 711{
670#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
671 struct ath_buf *bf, *bf_first, *bf_prev = NULL; 713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
@@ -674,14 +716,16 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
674 al_delta, h_baw = tid->baw_size / 2; 716 al_delta, h_baw = tid->baw_size / 2;
675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
676 struct ieee80211_tx_info *tx_info; 718 struct ieee80211_tx_info *tx_info;
719 struct ath_frame_info *fi;
677 720
678 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
679 722
680 do { 723 do {
681 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
725 fi = get_frame_info(bf->bf_mpdu);
682 726
683 /* do not step over block-ack window */ 727 /* do not step over block-ack window */
684 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { 728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
685 status = ATH_AGGR_BAW_CLOSED; 729 status = ATH_AGGR_BAW_CLOSED;
686 break; 730 break;
687 } 731 }
@@ -692,7 +736,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
692 } 736 }
693 737
694 /* do not exceed aggregation limit */ 738 /* do not exceed aggregation limit */
695 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; 739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
696 740
697 if (nframes && 741 if (nframes &&
698 (aggr_limit < (al + bpad + al_delta + prev_al))) { 742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
@@ -719,14 +763,15 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
719 * Get the delimiters needed to meet the MPDU 763 * Get the delimiters needed to meet the MPDU
720 * density for this node. 764 * density for this node.
721 */ 765 */
722 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); 766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
723 bpad = PADBYTES(al_delta) + (ndelim << 2); 767 bpad = PADBYTES(al_delta) + (ndelim << 2);
724 768
725 bf->bf_next = NULL; 769 bf->bf_next = NULL;
726 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0); 770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
727 771
728 /* link buffers of this frame to the aggregate */ 772 /* link buffers of this frame to the aggregate */
729 ath_tx_addto_baw(sc, tid, bf); 773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
730 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); 775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
731 list_move_tail(&bf->list, bf_q); 776 list_move_tail(&bf->list, bf_q);
732 if (bf_prev) { 777 if (bf_prev) {
@@ -738,8 +783,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
738 783
739 } while (!list_empty(&tid->buf_q)); 784 } while (!list_empty(&tid->buf_q));
740 785
741 bf_first->bf_al = al; 786 *aggr_len = al;
742 bf_first->bf_nframes = nframes;
743 787
744 return status; 788 return status;
745#undef PADBYTES 789#undef PADBYTES
@@ -750,7 +794,9 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
750{ 794{
751 struct ath_buf *bf; 795 struct ath_buf *bf;
752 enum ATH_AGGR_STATUS status; 796 enum ATH_AGGR_STATUS status;
797 struct ath_frame_info *fi;
753 struct list_head bf_q; 798 struct list_head bf_q;
799 int aggr_len;
754 800
755 do { 801 do {
756 if (list_empty(&tid->buf_q)) 802 if (list_empty(&tid->buf_q))
@@ -758,7 +804,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
758 804
759 INIT_LIST_HEAD(&bf_q); 805 INIT_LIST_HEAD(&bf_q);
760 806
761 status = ath_tx_form_aggr(sc, txq, tid, &bf_q); 807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
762 808
763 /* 809 /*
764 * no frames picked up to be aggregated; 810 * no frames picked up to be aggregated;
@@ -771,18 +817,20 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
771 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
772 818
773 /* if only one frame, send as non-aggregate */ 819 /* if only one frame, send as non-aggregate */
774 if (bf->bf_nframes == 1) { 820 if (bf == bf->bf_lastbf) {
821 fi = get_frame_info(bf->bf_mpdu);
822
775 bf->bf_state.bf_type &= ~BUF_AGGR; 823 bf->bf_state.bf_type &= ~BUF_AGGR;
776 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); 824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
777 ath_buf_set_rate(sc, bf); 825 ath_buf_set_rate(sc, bf, fi->framelen);
778 ath_tx_txqaddbuf(sc, txq, &bf_q); 826 ath_tx_txqaddbuf(sc, txq, &bf_q);
779 continue; 827 continue;
780 } 828 }
781 829
782 /* setup first desc of aggregate */ 830 /* setup first desc of aggregate */
783 bf->bf_state.bf_type |= BUF_AGGR; 831 bf->bf_state.bf_type |= BUF_AGGR;
784 ath_buf_set_rate(sc, bf); 832 ath_buf_set_rate(sc, bf, aggr_len);
785 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
786 834
787 /* anchor last desc of aggregate */ 835 /* anchor last desc of aggregate */
788 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); 836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
@@ -817,7 +865,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
817{ 865{
818 struct ath_node *an = (struct ath_node *)sta->drv_priv; 866 struct ath_node *an = (struct ath_node *)sta->drv_priv;
819 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 867 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
820 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 868 struct ath_txq *txq = txtid->ac->txq;
821 869
822 if (txtid->state & AGGR_CLEANUP) 870 if (txtid->state & AGGR_CLEANUP)
823 return; 871 return;
@@ -888,10 +936,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
888 struct ath_hw *ah = sc->sc_ah; 936 struct ath_hw *ah = sc->sc_ah;
889 struct ath_common *common = ath9k_hw_common(ah); 937 struct ath_common *common = ath9k_hw_common(ah);
890 struct ath9k_tx_queue_info qi; 938 struct ath9k_tx_queue_info qi;
939 static const int subtype_txq_to_hwq[] = {
940 [WME_AC_BE] = ATH_TXQ_AC_BE,
941 [WME_AC_BK] = ATH_TXQ_AC_BK,
942 [WME_AC_VI] = ATH_TXQ_AC_VI,
943 [WME_AC_VO] = ATH_TXQ_AC_VO,
944 };
891 int qnum, i; 945 int qnum, i;
892 946
893 memset(&qi, 0, sizeof(qi)); 947 memset(&qi, 0, sizeof(qi));
894 qi.tqi_subtype = subtype; 948 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
895 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 949 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
896 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 950 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
897 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 951 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
@@ -940,7 +994,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
940 if (!ATH_TXQ_SETUP(sc, qnum)) { 994 if (!ATH_TXQ_SETUP(sc, qnum)) {
941 struct ath_txq *txq = &sc->tx.txq[qnum]; 995 struct ath_txq *txq = &sc->tx.txq[qnum];
942 996
943 txq->axq_class = subtype;
944 txq->axq_qnum = qnum; 997 txq->axq_qnum = qnum;
945 txq->axq_link = NULL; 998 txq->axq_link = NULL;
946 INIT_LIST_HEAD(&txq->axq_q); 999 INIT_LIST_HEAD(&txq->axq_q);
@@ -1062,8 +1115,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1062 } 1115 }
1063 1116
1064 lastbf = bf->bf_lastbf; 1117 lastbf = bf->bf_lastbf;
1065 if (!retry_tx)
1066 lastbf->bf_tx_aborted = true;
1067 1118
1068 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1119 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1069 list_cut_position(&bf_head, 1120 list_cut_position(&bf_head,
@@ -1080,7 +1131,8 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1080 spin_unlock_bh(&txq->axq_lock); 1131 spin_unlock_bh(&txq->axq_lock);
1081 1132
1082 if (bf_isampdu(bf)) 1133 if (bf_isampdu(bf))
1083 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0); 1134 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1135 retry_tx);
1084 else 1136 else
1085 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 1137 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1086 } 1138 }
@@ -1101,7 +1153,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1101 1153
1102 if (bf_isampdu(bf)) 1154 if (bf_isampdu(bf))
1103 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 1155 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1104 &ts, 0); 1156 &ts, 0, retry_tx);
1105 else 1157 else
1106 ath_tx_complete_buf(sc, bf, txq, &bf_head, 1158 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1107 &ts, 0, 0); 1159 &ts, 0, 0);
@@ -1148,13 +1200,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1148 ath_print(common, ATH_DBG_FATAL, 1200 ath_print(common, ATH_DBG_FATAL,
1149 "Failed to stop TX DMA. Resetting hardware!\n"); 1201 "Failed to stop TX DMA. Resetting hardware!\n");
1150 1202
1151 spin_lock_bh(&sc->sc_resetlock);
1152 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 1203 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
1153 if (r) 1204 if (r)
1154 ath_print(common, ATH_DBG_FATAL, 1205 ath_print(common, ATH_DBG_FATAL,
1155 "Unable to reset hardware; reset status %d\n", 1206 "Unable to reset hardware; reset status %d\n",
1156 r); 1207 r);
1157 spin_unlock_bh(&sc->sc_resetlock);
1158 } 1208 }
1159 1209
1160 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1210 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
@@ -1212,24 +1262,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1212 } 1262 }
1213} 1263}
1214 1264
1215int ath_tx_setup(struct ath_softc *sc, int haltype)
1216{
1217 struct ath_txq *txq;
1218
1219 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1220 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1221 "HAL AC %u out of range, max %zu!\n",
1222 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1223 return 0;
1224 }
1225 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1226 if (txq != NULL) {
1227 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1228 return 1;
1229 } else
1230 return 0;
1231}
1232
1233/***********/ 1265/***********/
1234/* TX, DMA */ 1266/* TX, DMA */
1235/***********/ 1267/***********/
@@ -1299,12 +1331,11 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1299} 1331}
1300 1332
1301static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1333static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1302 struct list_head *bf_head, 1334 struct ath_buf *bf, struct ath_tx_control *txctl)
1303 struct ath_tx_control *txctl)
1304{ 1335{
1305 struct ath_buf *bf; 1336 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
1337 struct list_head bf_head;
1306 1338
1307 bf = list_first_entry(bf_head, struct ath_buf, list);
1308 bf->bf_state.bf_type |= BUF_AMPDU; 1339 bf->bf_state.bf_type |= BUF_AMPDU;
1309 TX_STAT_INC(txctl->txq->axq_qnum, a_queued); 1340 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
1310 1341
@@ -1316,56 +1347,47 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1316 * - h/w queue depth exceeds low water mark 1347 * - h/w queue depth exceeds low water mark
1317 */ 1348 */
1318 if (!list_empty(&tid->buf_q) || tid->paused || 1349 if (!list_empty(&tid->buf_q) || tid->paused ||
1319 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || 1350 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
1320 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { 1351 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1321 /* 1352 /*
1322 * Add this frame to software queue for scheduling later 1353 * Add this frame to software queue for scheduling later
1323 * for aggregation. 1354 * for aggregation.
1324 */ 1355 */
1325 list_move_tail(&bf->list, &tid->buf_q); 1356 list_add_tail(&bf->list, &tid->buf_q);
1326 ath_tx_queue_tid(txctl->txq, tid); 1357 ath_tx_queue_tid(txctl->txq, tid);
1327 return; 1358 return;
1328 } 1359 }
1329 1360
1361 INIT_LIST_HEAD(&bf_head);
1362 list_add(&bf->list, &bf_head);
1363
1330 /* Add sub-frame to BAW */ 1364 /* Add sub-frame to BAW */
1331 ath_tx_addto_baw(sc, tid, bf); 1365 if (!fi->retries)
1366 ath_tx_addto_baw(sc, tid, fi->seqno);
1332 1367
1333 /* Queue to h/w without aggregation */ 1368 /* Queue to h/w without aggregation */
1334 bf->bf_nframes = 1;
1335 bf->bf_lastbf = bf; 1369 bf->bf_lastbf = bf;
1336 ath_buf_set_rate(sc, bf); 1370 ath_buf_set_rate(sc, bf, fi->framelen);
1337 ath_tx_txqaddbuf(sc, txctl->txq, bf_head); 1371 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
1338} 1372}
1339 1373
1340static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 1374static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1341 struct ath_atx_tid *tid, 1375 struct ath_atx_tid *tid,
1342 struct list_head *bf_head) 1376 struct list_head *bf_head)
1343{ 1377{
1378 struct ath_frame_info *fi;
1344 struct ath_buf *bf; 1379 struct ath_buf *bf;
1345 1380
1346 bf = list_first_entry(bf_head, struct ath_buf, list); 1381 bf = list_first_entry(bf_head, struct ath_buf, list);
1347 bf->bf_state.bf_type &= ~BUF_AMPDU; 1382 bf->bf_state.bf_type &= ~BUF_AMPDU;
1348 1383
1349 /* update starting sequence number for subsequent ADDBA request */ 1384 /* update starting sequence number for subsequent ADDBA request */
1350 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1385 if (tid)
1351 1386 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1352 bf->bf_nframes = 1;
1353 bf->bf_lastbf = bf;
1354 ath_buf_set_rate(sc, bf);
1355 ath_tx_txqaddbuf(sc, txq, bf_head);
1356 TX_STAT_INC(txq->axq_qnum, queued);
1357}
1358
1359static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1360 struct list_head *bf_head)
1361{
1362 struct ath_buf *bf;
1363
1364 bf = list_first_entry(bf_head, struct ath_buf, list);
1365 1387
1366 bf->bf_lastbf = bf; 1388 bf->bf_lastbf = bf;
1367 bf->bf_nframes = 1; 1389 fi = get_frame_info(bf->bf_mpdu);
1368 ath_buf_set_rate(sc, bf); 1390 ath_buf_set_rate(sc, bf, fi->framelen);
1369 ath_tx_txqaddbuf(sc, txq, bf_head); 1391 ath_tx_txqaddbuf(sc, txq, bf_head);
1370 TX_STAT_INC(txq->axq_qnum, queued); 1392 TX_STAT_INC(txq->axq_qnum, queued);
1371} 1393}
@@ -1393,40 +1415,52 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1393 return htype; 1415 return htype;
1394} 1416}
1395 1417
1396static void assign_aggr_tid_seqno(struct sk_buff *skb, 1418static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1397 struct ath_buf *bf) 1419 int framelen)
1398{ 1420{
1421 struct ath_wiphy *aphy = hw->priv;
1422 struct ath_softc *sc = aphy->sc;
1399 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1423 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1424 struct ieee80211_sta *sta = tx_info->control.sta;
1425 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1400 struct ieee80211_hdr *hdr; 1426 struct ieee80211_hdr *hdr;
1427 struct ath_frame_info *fi = get_frame_info(skb);
1401 struct ath_node *an; 1428 struct ath_node *an;
1402 struct ath_atx_tid *tid; 1429 struct ath_atx_tid *tid;
1403 __le16 fc; 1430 enum ath9k_key_type keytype;
1404 u8 *qc; 1431 u16 seqno = 0;
1432 u8 tidno;
1405 1433
1406 if (!tx_info->control.sta) 1434 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1407 return;
1408 1435
1409 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1410 hdr = (struct ieee80211_hdr *)skb->data; 1436 hdr = (struct ieee80211_hdr *)skb->data;
1411 fc = hdr->frame_control; 1437 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1438 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
1412 1439
1413 if (ieee80211_is_data_qos(fc)) { 1440 an = (struct ath_node *) sta->drv_priv;
1414 qc = ieee80211_get_qos_ctl(hdr); 1441 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1415 bf->bf_tidno = qc[0] & 0xf; 1442
1443 /*
1444 * Override seqno set by upper layer with the one
1445 * in tx aggregation state.
1446 */
1447 tid = ATH_AN_2_TID(an, tidno);
1448 seqno = tid->seq_next;
1449 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1450 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1416 } 1451 }
1417 1452
1418 /* 1453 memset(fi, 0, sizeof(*fi));
1419 * For HT capable stations, we save tidno for later use. 1454 if (hw_key)
1420 * We also override seqno set by upper layer with the one 1455 fi->keyix = hw_key->hw_key_idx;
1421 * in tx aggregation state. 1456 else
1422 */ 1457 fi->keyix = ATH9K_TXKEYIX_INVALID;
1423 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1458 fi->keytype = keytype;
1424 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1459 fi->framelen = framelen;
1425 bf->bf_seqno = tid->seq_next; 1460 fi->seqno = seqno;
1426 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1427} 1461}
1428 1462
1429static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc) 1463static int setup_tx_flags(struct sk_buff *skb)
1430{ 1464{
1431 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1465 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1432 int flags = 0; 1466 int flags = 0;
@@ -1437,7 +1471,7 @@ static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
1437 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1471 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1438 flags |= ATH9K_TXDESC_NOACK; 1472 flags |= ATH9K_TXDESC_NOACK;
1439 1473
1440 if (use_ldpc) 1474 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1441 flags |= ATH9K_TXDESC_LDPC; 1475 flags |= ATH9K_TXDESC_LDPC;
1442 1476
1443 return flags; 1477 return flags;
@@ -1449,13 +1483,11 @@ static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
1449 * width - 0 for 20 MHz, 1 for 40 MHz 1483 * width - 0 for 20 MHz, 1 for 40 MHz
1450 * half_gi - to use 4us v/s 3.6 us for symbol time 1484 * half_gi - to use 4us v/s 3.6 us for symbol time
1451 */ 1485 */
1452static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, 1486static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
1453 int width, int half_gi, bool shortPreamble) 1487 int width, int half_gi, bool shortPreamble)
1454{ 1488{
1455 u32 nbits, nsymbits, duration, nsymbols; 1489 u32 nbits, nsymbits, duration, nsymbols;
1456 int streams, pktlen; 1490 int streams;
1457
1458 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1459 1491
1460 /* find number of symbols: PLCP + data */ 1492 /* find number of symbols: PLCP + data */
1461 streams = HT_RC_2_STREAMS(rix); 1493 streams = HT_RC_2_STREAMS(rix);
@@ -1474,7 +1506,7 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1474 return duration; 1506 return duration;
1475} 1507}
1476 1508
1477static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) 1509static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
1478{ 1510{
1479 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1511 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1480 struct ath9k_11n_rate_series series[4]; 1512 struct ath9k_11n_rate_series series[4];
@@ -1537,7 +1569,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1537 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1569 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1538 /* MCS rates */ 1570 /* MCS rates */
1539 series[i].Rate = rix | 0x80; 1571 series[i].Rate = rix | 0x80;
1540 series[i].PktDuration = ath_pkt_duration(sc, rix, bf, 1572 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
1541 is_40, is_sgi, is_sp); 1573 is_40, is_sgi, is_sp);
1542 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1574 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1543 series[i].RateFlags |= ATH9K_RATESERIES_STBC; 1575 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
@@ -1561,11 +1593,11 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1561 } 1593 }
1562 1594
1563 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1595 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1564 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp); 1596 phy, rate->bitrate * 100, len, rix, is_sp);
1565 } 1597 }
1566 1598
1567 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1599 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1568 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit)) 1600 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1569 flags &= ~ATH9K_TXDESC_RTSENA; 1601 flags &= ~ATH9K_TXDESC_RTSENA;
1570 1602
1571 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1603 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
@@ -1582,67 +1614,29 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1582 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192); 1614 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
1583} 1615}
1584 1616
1585static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, 1617static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1586 struct sk_buff *skb, 1618 struct ath_txq *txq,
1587 struct ath_tx_control *txctl) 1619 struct sk_buff *skb)
1588{ 1620{
1589 struct ath_wiphy *aphy = hw->priv; 1621 struct ath_wiphy *aphy = hw->priv;
1590 struct ath_softc *sc = aphy->sc; 1622 struct ath_softc *sc = aphy->sc;
1591 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1623 struct ath_hw *ah = sc->sc_ah;
1592 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1624 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1593 int hdrlen; 1625 struct ath_frame_info *fi = get_frame_info(skb);
1594 __le16 fc; 1626 struct ath_buf *bf;
1595 int padpos, padsize; 1627 struct ath_desc *ds;
1596 bool use_ldpc = false; 1628 int frm_type;
1597 1629
1598 tx_info->pad[0] = 0; 1630 bf = ath_tx_get_buffer(sc);
1599 switch (txctl->frame_type) { 1631 if (!bf) {
1600 case ATH9K_IFT_NOT_INTERNAL: 1632 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1601 break; 1633 return NULL;
1602 case ATH9K_IFT_PAUSE:
1603 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1604 /* fall through */
1605 case ATH9K_IFT_UNPAUSE:
1606 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1607 break;
1608 } 1634 }
1609 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1610 fc = hdr->frame_control;
1611 1635
1612 ATH_TXBUF_RESET(bf); 1636 ATH_TXBUF_RESET(bf);
1613 1637
1614 bf->aphy = aphy; 1638 bf->aphy = aphy;
1615 bf->bf_frmlen = skb->len + FCS_LEN; 1639 bf->bf_flags = setup_tx_flags(skb);
1616 /* Remove the padding size from bf_frmlen, if any */
1617 padpos = ath9k_cmn_padpos(hdr->frame_control);
1618 padsize = padpos & 3;
1619 if (padsize && skb->len>padpos+padsize) {
1620 bf->bf_frmlen -= padsize;
1621 }
1622
1623 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
1624 bf->bf_state.bf_type |= BUF_HT;
1625 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1626 use_ldpc = true;
1627 }
1628
1629 bf->bf_state.bfs_paprd = txctl->paprd;
1630 if (txctl->paprd)
1631 bf->bf_state.bfs_paprd_timestamp = jiffies;
1632 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1633
1634 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1635 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1636 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1637 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1638 } else {
1639 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1640 }
1641
1642 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1643 (sc->sc_flags & SC_OP_TXAGGR))
1644 assign_aggr_tid_seqno(skb, bf);
1645
1646 bf->bf_mpdu = skb; 1640 bf->bf_mpdu = skb;
1647 1641
1648 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 1642 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
@@ -1652,40 +1646,17 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1652 bf->bf_buf_addr = 0; 1646 bf->bf_buf_addr = 0;
1653 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1647 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1654 "dma_mapping_error() on TX\n"); 1648 "dma_mapping_error() on TX\n");
1655 return -ENOMEM; 1649 ath_tx_return_buffer(sc, bf);
1650 return NULL;
1656 } 1651 }
1657 1652
1658 bf->bf_tx_aborted = false;
1659
1660 return 0;
1661}
1662
1663/* FIXME: tx power */
1664static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1665 struct ath_tx_control *txctl)
1666{
1667 struct sk_buff *skb = bf->bf_mpdu;
1668 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1669 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1670 struct ath_node *an = NULL;
1671 struct list_head bf_head;
1672 struct ath_desc *ds;
1673 struct ath_atx_tid *tid;
1674 struct ath_hw *ah = sc->sc_ah;
1675 int frm_type;
1676 __le16 fc;
1677
1678 frm_type = get_hw_packet_type(skb); 1653 frm_type = get_hw_packet_type(skb);
1679 fc = hdr->frame_control;
1680
1681 INIT_LIST_HEAD(&bf_head);
1682 list_add_tail(&bf->list, &bf_head);
1683 1654
1684 ds = bf->bf_desc; 1655 ds = bf->bf_desc;
1685 ath9k_hw_set_desc_link(ah, ds, 0); 1656 ath9k_hw_set_desc_link(ah, ds, 0);
1686 1657
1687 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, 1658 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1688 bf->bf_keyix, bf->bf_keytype, bf->bf_flags); 1659 fi->keyix, fi->keytype, bf->bf_flags);
1689 1660
1690 ath9k_hw_filltxdesc(ah, ds, 1661 ath9k_hw_filltxdesc(ah, ds,
1691 skb->len, /* segment length */ 1662 skb->len, /* segment length */
@@ -1693,42 +1664,50 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1693 true, /* last segment */ 1664 true, /* last segment */
1694 ds, /* first descriptor */ 1665 ds, /* first descriptor */
1695 bf->bf_buf_addr, 1666 bf->bf_buf_addr,
1696 txctl->txq->axq_qnum); 1667 txq->axq_qnum);
1697 1668
1698 if (bf->bf_state.bfs_paprd)
1699 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1700 1669
1701 spin_lock_bh(&txctl->txq->axq_lock); 1670 return bf;
1671}
1702 1672
1703 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && 1673/* FIXME: tx power */
1704 tx_info->control.sta) { 1674static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1705 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1675 struct ath_tx_control *txctl)
1706 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1676{
1677 struct sk_buff *skb = bf->bf_mpdu;
1678 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1679 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1680 struct list_head bf_head;
1681 struct ath_atx_tid *tid;
1682 u8 tidno;
1707 1683
1708 if (!ieee80211_is_data_qos(fc)) { 1684 spin_lock_bh(&txctl->txq->axq_lock);
1709 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1710 goto tx_done;
1711 }
1712 1685
1713 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1686 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && txctl->an) {
1714 /* 1687 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1715 * Try aggregation if it's a unicast data frame 1688 IEEE80211_QOS_CTL_TID_MASK;
1716 * and the destination is HT capable. 1689 tid = ATH_AN_2_TID(txctl->an, tidno);
1717 */ 1690
1718 ath_tx_send_ampdu(sc, tid, &bf_head, txctl); 1691 WARN_ON(tid->ac->txq != txctl->txq);
1719 } else { 1692 /*
1720 /* 1693 * Try aggregation if it's a unicast data frame
1721 * Send this frame as regular when ADDBA 1694 * and the destination is HT capable.
1722 * exchange is neither complete nor pending. 1695 */
1723 */ 1696 ath_tx_send_ampdu(sc, tid, bf, txctl);
1724 ath_tx_send_ht_normal(sc, txctl->txq,
1725 tid, &bf_head);
1726 }
1727 } else { 1697 } else {
1728 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1698 INIT_LIST_HEAD(&bf_head);
1699 list_add_tail(&bf->list, &bf_head);
1700
1701 bf->bf_state.bfs_ftype = txctl->frame_type;
1702 bf->bf_state.bfs_paprd = txctl->paprd;
1703
1704 if (bf->bf_state.bfs_paprd)
1705 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1706 bf->bf_state.bfs_paprd);
1707
1708 ath_tx_send_normal(sc, txctl->txq, NULL, &bf_head);
1729 } 1709 }
1730 1710
1731tx_done:
1732 spin_unlock_bh(&txctl->txq->axq_lock); 1711 spin_unlock_bh(&txctl->txq->axq_lock);
1733} 1712}
1734 1713
@@ -1736,66 +1715,20 @@ tx_done:
1736int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1715int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1737 struct ath_tx_control *txctl) 1716 struct ath_tx_control *txctl)
1738{ 1717{
1718 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1719 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1720 struct ieee80211_sta *sta = info->control.sta;
1739 struct ath_wiphy *aphy = hw->priv; 1721 struct ath_wiphy *aphy = hw->priv;
1740 struct ath_softc *sc = aphy->sc; 1722 struct ath_softc *sc = aphy->sc;
1741 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1742 struct ath_txq *txq = txctl->txq; 1723 struct ath_txq *txq = txctl->txq;
1743 struct ath_buf *bf; 1724 struct ath_buf *bf;
1744 int q, r;
1745
1746 bf = ath_tx_get_buffer(sc);
1747 if (!bf) {
1748 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1749 return -1;
1750 }
1751
1752 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
1753 if (unlikely(r)) {
1754 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
1755
1756 /* upon ath_tx_processq() this TX queue will be resumed, we
1757 * guarantee this will happen by knowing beforehand that
1758 * we will at least have to run TX completionon one buffer
1759 * on the queue */
1760 spin_lock_bh(&txq->axq_lock);
1761 if (!txq->stopped && txq->axq_depth > 1) {
1762 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1763 txq->stopped = 1;
1764 }
1765 spin_unlock_bh(&txq->axq_lock);
1766
1767 ath_tx_return_buffer(sc, bf);
1768
1769 return r;
1770 }
1771
1772 q = skb_get_queue_mapping(skb);
1773 if (q >= 4)
1774 q = 0;
1775
1776 spin_lock_bh(&txq->axq_lock);
1777 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1778 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1779 txq->stopped = 1;
1780 }
1781 spin_unlock_bh(&txq->axq_lock);
1782
1783 ath_tx_start_dma(sc, bf, txctl);
1784
1785 return 0;
1786}
1787
1788void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1789{
1790 struct ath_wiphy *aphy = hw->priv;
1791 struct ath_softc *sc = aphy->sc;
1792 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1793 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1794 int padpos, padsize; 1725 int padpos, padsize;
1795 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1726 int frmlen = skb->len + FCS_LEN;
1796 struct ath_tx_control txctl; 1727 int q;
1797 1728
1798 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1729 txctl->an = (struct ath_node *)sta->drv_priv;
1730 if (info->control.hw_key)
1731 frmlen += info->control.hw_key->icv_len;
1799 1732
1800 /* 1733 /*
1801 * As a temporary workaround, assign seq# here; this will likely need 1734 * As a temporary workaround, assign seq# here; this will likely need
@@ -1812,30 +1745,37 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1812 /* Add the padding after the header if this is not already done */ 1745 /* Add the padding after the header if this is not already done */
1813 padpos = ath9k_cmn_padpos(hdr->frame_control); 1746 padpos = ath9k_cmn_padpos(hdr->frame_control);
1814 padsize = padpos & 3; 1747 padsize = padpos & 3;
1815 if (padsize && skb->len>padpos) { 1748 if (padsize && skb->len > padpos) {
1816 if (skb_headroom(skb) < padsize) { 1749 if (skb_headroom(skb) < padsize)
1817 ath_print(common, ATH_DBG_XMIT, 1750 return -ENOMEM;
1818 "TX CABQ padding failed\n"); 1751
1819 dev_kfree_skb_any(skb);
1820 return;
1821 }
1822 skb_push(skb, padsize); 1752 skb_push(skb, padsize);
1823 memmove(skb->data, skb->data + padsize, padpos); 1753 memmove(skb->data, skb->data + padsize, padpos);
1824 } 1754 }
1825 1755
1826 txctl.txq = sc->beacon.cabq; 1756 setup_frame_info(hw, skb, frmlen);
1757
1758 /*
1759 * At this point, the vif, hw_key and sta pointers in the tx control
1760 * info are no longer valid (overwritten by the ath_frame_info data.
1761 */
1827 1762
1828 ath_print(common, ATH_DBG_XMIT, 1763 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
1829 "transmitting CABQ packet, skb: %p\n", skb); 1764 if (unlikely(!bf))
1765 return -ENOMEM;
1830 1766
1831 if (ath_tx_start(hw, skb, &txctl) != 0) { 1767 q = skb_get_queue_mapping(skb);
1832 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n"); 1768 spin_lock_bh(&txq->axq_lock);
1833 goto exit; 1769 if (txq == sc->tx.txq_map[q] &&
1770 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1771 ath_mac80211_stop_queue(sc, q);
1772 txq->stopped = 1;
1834 } 1773 }
1774 spin_unlock_bh(&txq->axq_lock);
1775
1776 ath_tx_start_dma(sc, bf, txctl);
1835 1777
1836 return; 1778 return 0;
1837exit:
1838 dev_kfree_skb_any(skb);
1839} 1779}
1840 1780
1841/*****************/ 1781/*****************/
@@ -1843,7 +1783,8 @@ exit:
1843/*****************/ 1783/*****************/
1844 1784
1845static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1785static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1846 struct ath_wiphy *aphy, int tx_flags) 1786 struct ath_wiphy *aphy, int tx_flags, int ftype,
1787 struct ath_txq *txq)
1847{ 1788{
1848 struct ieee80211_hw *hw = sc->hw; 1789 struct ieee80211_hw *hw = sc->hw;
1849 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1790 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1886,15 +1827,16 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1886 PS_WAIT_FOR_TX_ACK)); 1827 PS_WAIT_FOR_TX_ACK));
1887 } 1828 }
1888 1829
1889 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1830 if (unlikely(ftype))
1890 ath9k_tx_status(hw, skb); 1831 ath9k_tx_status(hw, skb, ftype);
1891 else { 1832 else {
1892 q = skb_get_queue_mapping(skb); 1833 q = skb_get_queue_mapping(skb);
1893 if (q >= 4) 1834 if (txq == sc->tx.txq_map[q]) {
1894 q = 0; 1835 spin_lock_bh(&txq->axq_lock);
1895 1836 if (WARN_ON(--txq->pending_frames < 0))
1896 if (--sc->tx.pending_frames[q] < 0) 1837 txq->pending_frames = 0;
1897 sc->tx.pending_frames[q] = 0; 1838 spin_unlock_bh(&txq->axq_lock);
1839 }
1898 1840
1899 ieee80211_tx_status(hw, skb); 1841 ieee80211_tx_status(hw, skb);
1900 } 1842 }
@@ -1922,15 +1864,14 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1922 bf->bf_buf_addr = 0; 1864 bf->bf_buf_addr = 0;
1923 1865
1924 if (bf->bf_state.bfs_paprd) { 1866 if (bf->bf_state.bfs_paprd) {
1925 if (time_after(jiffies, 1867 if (!sc->paprd_pending)
1926 bf->bf_state.bfs_paprd_timestamp +
1927 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
1928 dev_kfree_skb_any(skb); 1868 dev_kfree_skb_any(skb);
1929 else 1869 else
1930 complete(&sc->paprd_complete); 1870 complete(&sc->paprd_complete);
1931 } else { 1871 } else {
1932 ath_debug_stat_tx(sc, txq, bf, ts); 1872 ath_debug_stat_tx(sc, bf, ts);
1933 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1873 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1874 bf->bf_state.bfs_ftype, txq);
1934 } 1875 }
1935 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 1876 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1936 * accidentally reference it later. 1877 * accidentally reference it later.
@@ -1945,42 +1886,15 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1945 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 1886 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1946} 1887}
1947 1888
1948static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1949 struct ath_tx_status *ts, int txok)
1950{
1951 u16 seq_st = 0;
1952 u32 ba[WME_BA_BMP_SIZE >> 5];
1953 int ba_index;
1954 int nbad = 0;
1955 int isaggr = 0;
1956
1957 if (bf->bf_lastbf->bf_tx_aborted)
1958 return 0;
1959
1960 isaggr = bf_isaggr(bf);
1961 if (isaggr) {
1962 seq_st = ts->ts_seqnum;
1963 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
1964 }
1965
1966 while (bf) {
1967 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1968 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1969 nbad++;
1970
1971 bf = bf->bf_next;
1972 }
1973
1974 return nbad;
1975}
1976
1977static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 1889static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
1978 int nbad, int txok, bool update_rc) 1890 int nframes, int nbad, int txok, bool update_rc)
1979{ 1891{
1980 struct sk_buff *skb = bf->bf_mpdu; 1892 struct sk_buff *skb = bf->bf_mpdu;
1981 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1893 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1982 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1894 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1983 struct ieee80211_hw *hw = bf->aphy->hw; 1895 struct ieee80211_hw *hw = bf->aphy->hw;
1896 struct ath_softc *sc = bf->aphy->sc;
1897 struct ath_hw *ah = sc->sc_ah;
1984 u8 i, tx_rateindex; 1898 u8 i, tx_rateindex;
1985 1899
1986 if (txok) 1900 if (txok)
@@ -1994,22 +1908,32 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
1994 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) { 1908 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
1995 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 1909 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
1996 1910
1997 BUG_ON(nbad > bf->bf_nframes); 1911 BUG_ON(nbad > nframes);
1998 1912
1999 tx_info->status.ampdu_len = bf->bf_nframes; 1913 tx_info->status.ampdu_len = nframes;
2000 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad; 1914 tx_info->status.ampdu_ack_len = nframes - nbad;
2001 } 1915 }
2002 1916
2003 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 1917 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2004 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 1918 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
2005 if (ieee80211_is_data(hdr->frame_control)) { 1919 /*
2006 if (ts->ts_flags & 1920 * If an underrun error is seen assume it as an excessive
2007 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) 1921 * retry only if max frame trigger level has been reached
2008 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; 1922 * (2 KB for single stream, and 4 KB for dual stream).
2009 if ((ts->ts_status & ATH9K_TXERR_XRETRY) || 1923 * Adjust the long retry as if the frame was tried
2010 (ts->ts_status & ATH9K_TXERR_FIFO)) 1924 * hw->max_rate_tries times to affect how rate control updates
2011 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 1925 * PER for the failed rate.
2012 } 1926 * In case of congestion on the bus penalizing this type of
1927 * underruns should help hardware actually transmit new frames
1928 * successfully by eventually preferring slower rates.
1929 * This itself should also alleviate congestion on the bus.
1930 */
1931 if (ieee80211_is_data(hdr->frame_control) &&
1932 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1933 ATH9K_TX_DELIM_UNDERRUN)) &&
1934 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1935 tx_info->status.rates[tx_rateindex].count =
1936 hw->max_rate_tries;
2013 } 1937 }
2014 1938
2015 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 1939 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
@@ -2020,16 +1944,13 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2020 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 1944 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2021} 1945}
2022 1946
2023static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 1947static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
2024{ 1948{
2025 int qnum; 1949 struct ath_txq *txq;
2026
2027 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2028 if (qnum == -1)
2029 return;
2030 1950
1951 txq = sc->tx.txq_map[qnum];
2031 spin_lock_bh(&txq->axq_lock); 1952 spin_lock_bh(&txq->axq_lock);
2032 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) { 1953 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2033 if (ath_mac80211_start_queue(sc, qnum)) 1954 if (ath_mac80211_start_queue(sc, qnum))
2034 txq->stopped = 0; 1955 txq->stopped = 0;
2035 } 1956 }
@@ -2046,6 +1967,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2046 struct ath_tx_status ts; 1967 struct ath_tx_status ts;
2047 int txok; 1968 int txok;
2048 int status; 1969 int status;
1970 int qnum;
2049 1971
2050 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 1972 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2051 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 1973 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
@@ -2118,15 +2040,19 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2118 */ 2040 */
2119 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2041 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2120 bf->bf_state.bf_type |= BUF_XRETRY; 2042 bf->bf_state.bf_type |= BUF_XRETRY;
2121 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true); 2043 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
2122 } 2044 }
2123 2045
2046 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2047
2124 if (bf_isampdu(bf)) 2048 if (bf_isampdu(bf))
2125 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok); 2049 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2050 true);
2126 else 2051 else
2127 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); 2052 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2128 2053
2129 ath_wake_mac80211_queue(sc, txq); 2054 if (txq == sc->tx.txq_map[qnum])
2055 ath_wake_mac80211_queue(sc, qnum);
2130 2056
2131 spin_lock_bh(&txq->axq_lock); 2057 spin_lock_bh(&txq->axq_lock);
2132 if (sc->sc_flags & SC_OP_TXAGGR) 2058 if (sc->sc_flags & SC_OP_TXAGGR)
@@ -2196,6 +2122,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2196 struct list_head bf_head; 2122 struct list_head bf_head;
2197 int status; 2123 int status;
2198 int txok; 2124 int txok;
2125 int qnum;
2199 2126
2200 for (;;) { 2127 for (;;) {
2201 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); 2128 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
@@ -2236,16 +2163,20 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2236 if (!bf_isampdu(bf)) { 2163 if (!bf_isampdu(bf)) {
2237 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2164 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2238 bf->bf_state.bf_type |= BUF_XRETRY; 2165 bf->bf_state.bf_type |= BUF_XRETRY;
2239 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true); 2166 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
2240 } 2167 }
2241 2168
2169 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2170
2242 if (bf_isampdu(bf)) 2171 if (bf_isampdu(bf))
2243 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok); 2172 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2173 txok, true);
2244 else 2174 else
2245 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2175 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2246 &txs, txok, 0); 2176 &txs, txok, 0);
2247 2177
2248 ath_wake_mac80211_queue(sc, txq); 2178 if (txq == sc->tx.txq_map[qnum])
2179 ath_wake_mac80211_queue(sc, qnum);
2249 2180
2250 spin_lock_bh(&txq->axq_lock); 2181 spin_lock_bh(&txq->axq_lock);
2251 if (!list_empty(&txq->txq_fifo_pending)) { 2182 if (!list_empty(&txq->txq_fifo_pending)) {
@@ -2377,7 +2308,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2377 for (acno = 0, ac = &an->ac[acno]; 2308 for (acno = 0, ac = &an->ac[acno];
2378 acno < WME_NUM_AC; acno++, ac++) { 2309 acno < WME_NUM_AC; acno++, ac++) {
2379 ac->sched = false; 2310 ac->sched = false;
2380 ac->qnum = sc->tx.hwq_map[acno]; 2311 ac->txq = sc->tx.txq_map[acno];
2381 INIT_LIST_HEAD(&ac->tid_q); 2312 INIT_LIST_HEAD(&ac->tid_q);
2382 } 2313 }
2383} 2314}
@@ -2387,17 +2318,13 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2387 struct ath_atx_ac *ac; 2318 struct ath_atx_ac *ac;
2388 struct ath_atx_tid *tid; 2319 struct ath_atx_tid *tid;
2389 struct ath_txq *txq; 2320 struct ath_txq *txq;
2390 int i, tidno; 2321 int tidno;
2391 2322
2392 for (tidno = 0, tid = &an->tid[tidno]; 2323 for (tidno = 0, tid = &an->tid[tidno];
2393 tidno < WME_NUM_TID; tidno++, tid++) { 2324 tidno < WME_NUM_TID; tidno++, tid++) {
2394 i = tid->ac->qnum;
2395
2396 if (!ATH_TXQ_SETUP(sc, i))
2397 continue;
2398 2325
2399 txq = &sc->tx.txq[i];
2400 ac = tid->ac; 2326 ac = tid->ac;
2327 txq = ac->txq;
2401 2328
2402 spin_lock_bh(&txq->axq_lock); 2329 spin_lock_bh(&txq->axq_lock);
2403 2330
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 6cf0c9ef47a..d07ff7f2fd9 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -48,7 +48,7 @@
48#include <linux/usb.h> 48#include <linux/usb.h>
49#ifdef CONFIG_CARL9170_LEDS 49#ifdef CONFIG_CARL9170_LEDS
50#include <linux/leds.h> 50#include <linux/leds.h>
51#endif /* CONFIG_CARL170_LEDS */ 51#endif /* CONFIG_CARL9170_LEDS */
52#ifdef CONFIG_CARL9170_WPC 52#ifdef CONFIG_CARL9170_WPC
53#include <linux/input.h> 53#include <linux/input.h>
54#endif /* CONFIG_CARL9170_WPC */ 54#endif /* CONFIG_CARL9170_WPC */
@@ -215,7 +215,7 @@ enum carl9170_restart_reasons {
215 CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS, 215 CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS,
216 CARL9170_RR_WATCHDOG, 216 CARL9170_RR_WATCHDOG,
217 CARL9170_RR_STUCK_TX, 217 CARL9170_RR_STUCK_TX,
218 CARL9170_RR_SLOW_SYSTEM, 218 CARL9170_RR_UNRESPONSIVE_DEVICE,
219 CARL9170_RR_COMMAND_TIMEOUT, 219 CARL9170_RR_COMMAND_TIMEOUT,
220 CARL9170_RR_TOO_MANY_PHY_ERRORS, 220 CARL9170_RR_TOO_MANY_PHY_ERRORS,
221 CARL9170_RR_LOST_RSP, 221 CARL9170_RR_LOST_RSP,
@@ -287,6 +287,7 @@ struct ar9170 {
287 287
288 /* reset / stuck frames/queue detection */ 288 /* reset / stuck frames/queue detection */
289 struct work_struct restart_work; 289 struct work_struct restart_work;
290 struct work_struct ping_work;
290 unsigned int restart_counter; 291 unsigned int restart_counter;
291 unsigned long queue_stop_timeout[__AR9170_NUM_TXQ]; 292 unsigned long queue_stop_timeout[__AR9170_NUM_TXQ];
292 unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ]; 293 unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index c21f3364bfe..cdfc94c371b 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -41,7 +41,7 @@
41 41
42int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val) 42int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
43{ 43{
44 __le32 buf[2] = { 44 const __le32 buf[2] = {
45 cpu_to_le32(reg), 45 cpu_to_le32(reg),
46 cpu_to_le32(val), 46 cpu_to_le32(val),
47 }; 47 };
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index d552166db50..3680dfc70f4 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -97,13 +97,13 @@ struct carl9170_set_key_cmd {
97 __le16 type; 97 __le16 type;
98 u8 macAddr[6]; 98 u8 macAddr[6];
99 u32 key[4]; 99 u32 key[4];
100} __packed; 100} __packed __aligned(4);
101#define CARL9170_SET_KEY_CMD_SIZE 28 101#define CARL9170_SET_KEY_CMD_SIZE 28
102 102
103struct carl9170_disable_key_cmd { 103struct carl9170_disable_key_cmd {
104 __le16 user; 104 __le16 user;
105 __le16 padding; 105 __le16 padding;
106} __packed; 106} __packed __aligned(4);
107#define CARL9170_DISABLE_KEY_CMD_SIZE 4 107#define CARL9170_DISABLE_KEY_CMD_SIZE 4
108 108
109struct carl9170_u32_list { 109struct carl9170_u32_list {
@@ -206,7 +206,7 @@ struct carl9170_cmd {
206 struct carl9170_rx_filter_cmd rx_filter; 206 struct carl9170_rx_filter_cmd rx_filter;
207 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN]; 207 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
208 } __packed; 208 } __packed;
209} __packed; 209} __packed __aligned(4);
210 210
211#define CARL9170_TX_STATUS_QUEUE 3 211#define CARL9170_TX_STATUS_QUEUE 3
212#define CARL9170_TX_STATUS_QUEUE_S 0 212#define CARL9170_TX_STATUS_QUEUE_S 0
@@ -216,6 +216,7 @@ struct carl9170_cmd {
216#define CARL9170_TX_STATUS_TRIES (7 << CARL9170_TX_STATUS_TRIES_S) 216#define CARL9170_TX_STATUS_TRIES (7 << CARL9170_TX_STATUS_TRIES_S)
217#define CARL9170_TX_STATUS_SUCCESS 0x80 217#define CARL9170_TX_STATUS_SUCCESS 0x80
218 218
219#ifdef __CARL9170FW__
219/* 220/*
220 * NOTE: 221 * NOTE:
221 * Both structs [carl9170_tx_status and _carl9170_tx_status] 222 * Both structs [carl9170_tx_status and _carl9170_tx_status]
@@ -232,6 +233,8 @@ struct carl9170_tx_status {
232 u8 tries:3; 233 u8 tries:3;
233 u8 success:1; 234 u8 success:1;
234} __packed; 235} __packed;
236#endif /* __CARL9170FW__ */
237
235struct _carl9170_tx_status { 238struct _carl9170_tx_status {
236 /* 239 /*
237 * This version should be immune to all alignment bugs. 240 * This version should be immune to all alignment bugs.
@@ -272,13 +275,15 @@ struct carl9170_rsp {
272 struct carl9170_rf_init_result rf_init_res; 275 struct carl9170_rf_init_result rf_init_res;
273 struct carl9170_u32_list rreg_res; 276 struct carl9170_u32_list rreg_res;
274 struct carl9170_u32_list echo; 277 struct carl9170_u32_list echo;
278#ifdef __CARL9170FW__
275 struct carl9170_tx_status tx_status[0]; 279 struct carl9170_tx_status tx_status[0];
280#endif /* __CARL9170FW__ */
276 struct _carl9170_tx_status _tx_status[0]; 281 struct _carl9170_tx_status _tx_status[0];
277 struct carl9170_gpio gpio; 282 struct carl9170_gpio gpio;
278 struct carl9170_tsf_rsp tsf; 283 struct carl9170_tsf_rsp tsf;
279 struct carl9170_psm psm; 284 struct carl9170_psm psm;
280 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN]; 285 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
281 } __packed; 286 } __packed;
282} __packed; 287} __packed __aligned(4);
283 288
284#endif /* __CARL9170_SHARED_FWCMD_H */ 289#endif /* __CARL9170_SHARED_FWCMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index 2f471b3f05a..e85df6edfed 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -712,7 +712,8 @@ struct ar9170_stream {
712 __le16 tag; 712 __le16 tag;
713 713
714 u8 payload[0]; 714 u8 payload[0];
715}; 715} __packed __aligned(4);
716#define AR9170_STREAM_LEN 4
716 717
717#define AR9170_MAX_ACKTABLE_ENTRIES 8 718#define AR9170_MAX_ACKTABLE_ENTRIES 8
718#define AR9170_MAX_VIRTUAL_MAC 7 719#define AR9170_MAX_VIRTUAL_MAC 7
@@ -736,4 +737,8 @@ struct ar9170_stream {
736 737
737#define MOD_VAL(reg, value, newvalue) \ 738#define MOD_VAL(reg, value, newvalue) \
738 (((value) & ~reg) | (((newvalue) << reg##_S) & reg)) 739 (((value) & ~reg) | (((newvalue) << reg##_S) & reg))
740
741#define GET_VAL(reg, value) \
742 (((value) & reg) >> reg##_S)
743
739#endif /* __CARL9170_SHARED_HW_H */ 744#endif /* __CARL9170_SHARED_HW_H */
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 2305bc27151..385cf508479 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -205,8 +205,8 @@ int carl9170_init_mac(struct ar9170 *ar)
205 carl9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105); 205 carl9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
206 206
207 /* Aggregation MAX number and timeout */ 207 /* Aggregation MAX number and timeout */
208 carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0xa); 208 carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0x8000a);
209 carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a00); 209 carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a07);
210 210
211 carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER, 211 carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
212 AR9170_MAC_FTF_DEFAULTS); 212 AR9170_MAC_FTF_DEFAULTS);
@@ -457,8 +457,9 @@ int carl9170_set_beacon_timers(struct ar9170 *ar)
457 457
458int carl9170_update_beacon(struct ar9170 *ar, const bool submit) 458int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
459{ 459{
460 struct sk_buff *skb; 460 struct sk_buff *skb = NULL;
461 struct carl9170_vif_info *cvif; 461 struct carl9170_vif_info *cvif;
462 struct ieee80211_tx_info *txinfo;
462 __le32 *data, *old = NULL; 463 __le32 *data, *old = NULL;
463 u32 word, off, addr, len; 464 u32 word, off, addr, len;
464 int i = 0, err = 0; 465 int i = 0, err = 0;
@@ -487,7 +488,13 @@ found:
487 488
488 if (!skb) { 489 if (!skb) {
489 err = -ENOMEM; 490 err = -ENOMEM;
490 goto out_unlock; 491 goto err_free;
492 }
493
494 txinfo = IEEE80211_SKB_CB(skb);
495 if (txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS) {
496 err = -EINVAL;
497 goto err_free;
491 } 498 }
492 499
493 spin_lock_bh(&ar->beacon_lock); 500 spin_lock_bh(&ar->beacon_lock);
@@ -504,11 +511,8 @@ found:
504 wiphy_err(ar->hw->wiphy, "beacon does not " 511 wiphy_err(ar->hw->wiphy, "beacon does not "
505 "fit into device memory!\n"); 512 "fit into device memory!\n");
506 } 513 }
507
508 spin_unlock_bh(&ar->beacon_lock);
509 dev_kfree_skb_any(skb);
510 err = -EINVAL; 514 err = -EINVAL;
511 goto out_unlock; 515 goto err_unlock;
512 } 516 }
513 517
514 if (len > AR9170_MAC_BCN_LENGTH_MAX) { 518 if (len > AR9170_MAC_BCN_LENGTH_MAX) {
@@ -518,22 +522,22 @@ found:
518 AR9170_MAC_BCN_LENGTH_MAX, len); 522 AR9170_MAC_BCN_LENGTH_MAX, len);
519 } 523 }
520 524
521 spin_unlock_bh(&ar->beacon_lock);
522 dev_kfree_skb_any(skb);
523 err = -EMSGSIZE; 525 err = -EMSGSIZE;
524 goto out_unlock; 526 goto err_unlock;
525 } 527 }
526 528
527 carl9170_async_regwrite_begin(ar); 529 i = txinfo->control.rates[0].idx;
530 if (txinfo->band != IEEE80211_BAND_2GHZ)
531 i += 4;
528 532
529 /* XXX: use skb->cb info */ 533 word = __carl9170_ratetable[i].hw_value & 0xf;
530 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) { 534 if (i < 4)
531 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, 535 word |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
532 ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400); 536 else
533 } else { 537 word |= ((skb->len + FCS_LEN) << 16) + 0x0010;
534 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, 538
535 ((skb->len + FCS_LEN) << 16) + 0x001b); 539 carl9170_async_regwrite_begin(ar);
536 } 540 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, word);
537 541
538 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) { 542 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
539 /* 543 /*
@@ -557,7 +561,7 @@ found:
557 cvif->beacon = skb; 561 cvif->beacon = skb;
558 spin_unlock_bh(&ar->beacon_lock); 562 spin_unlock_bh(&ar->beacon_lock);
559 if (err) 563 if (err)
560 goto out_unlock; 564 goto err_free;
561 565
562 if (submit) { 566 if (submit) {
563 err = carl9170_bcn_ctrl(ar, cvif->id, 567 err = carl9170_bcn_ctrl(ar, cvif->id,
@@ -565,10 +569,18 @@ found:
565 addr, skb->len + FCS_LEN); 569 addr, skb->len + FCS_LEN);
566 570
567 if (err) 571 if (err)
568 goto out_unlock; 572 goto err_free;
569 } 573 }
570out_unlock: 574out_unlock:
571 rcu_read_unlock(); 575 rcu_read_unlock();
576 return 0;
577
578err_unlock:
579 spin_unlock_bh(&ar->beacon_lock);
580
581err_free:
582 rcu_read_unlock();
583 dev_kfree_skb_any(skb);
572 return err; 584 return err;
573} 585}
574 586
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index dc7b30b170d..870df8c4262 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -428,6 +428,7 @@ static void carl9170_cancel_worker(struct ar9170 *ar)
428 cancel_delayed_work_sync(&ar->led_work); 428 cancel_delayed_work_sync(&ar->led_work);
429#endif /* CONFIG_CARL9170_LEDS */ 429#endif /* CONFIG_CARL9170_LEDS */
430 cancel_work_sync(&ar->ps_work); 430 cancel_work_sync(&ar->ps_work);
431 cancel_work_sync(&ar->ping_work);
431 cancel_work_sync(&ar->ampdu_work); 432 cancel_work_sync(&ar->ampdu_work);
432} 433}
433 434
@@ -533,6 +534,21 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
533 */ 534 */
534} 535}
535 536
537static void carl9170_ping_work(struct work_struct *work)
538{
539 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
540 int err;
541
542 if (!IS_STARTED(ar))
543 return;
544
545 mutex_lock(&ar->mutex);
546 err = carl9170_echo_test(ar, 0xdeadbeef);
547 if (err)
548 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
549 mutex_unlock(&ar->mutex);
550}
551
536static int carl9170_init_interface(struct ar9170 *ar, 552static int carl9170_init_interface(struct ar9170 *ar,
537 struct ieee80211_vif *vif) 553 struct ieee80211_vif *vif)
538{ 554{
@@ -1614,6 +1630,7 @@ void *carl9170_alloc(size_t priv_size)
1614 skb_queue_head_init(&ar->tx_pending[i]); 1630 skb_queue_head_init(&ar->tx_pending[i]);
1615 } 1631 }
1616 INIT_WORK(&ar->ps_work, carl9170_ps_work); 1632 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1633 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1617 INIT_WORK(&ar->restart_work, carl9170_restart_work); 1634 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1618 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work); 1635 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1619 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor); 1636 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
@@ -1829,7 +1846,7 @@ int carl9170_register(struct ar9170 *ar)
1829 err = carl9170_led_register(ar); 1846 err = carl9170_led_register(ar);
1830 if (err) 1847 if (err)
1831 goto err_unreg; 1848 goto err_unreg;
1832#endif /* CONFIG_CAR9L170_LEDS */ 1849#endif /* CONFIG_CARL9170_LEDS */
1833 1850
1834#ifdef CONFIG_CARL9170_WPC 1851#ifdef CONFIG_CARL9170_WPC
1835 err = carl9170_register_wps_button(ar); 1852 err = carl9170_register_wps_button(ar);
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index 89deca37a98..82bc81c4c93 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -1554,15 +1554,6 @@ static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
1554 return carl9170_regwrite_result(); 1554 return carl9170_regwrite_result();
1555} 1555}
1556 1556
1557/* TODO: replace this with sign_extend32(noise, 8) */
1558static int carl9170_calc_noise_dbm(u32 raw_noise)
1559{
1560 if (raw_noise & 0x100)
1561 return ~0x1ff | raw_noise;
1562 else
1563 return raw_noise;
1564}
1565
1566int carl9170_get_noisefloor(struct ar9170 *ar) 1557int carl9170_get_noisefloor(struct ar9170 *ar)
1567{ 1558{
1568 static const u32 phy_regs[] = { 1559 static const u32 phy_regs[] = {
@@ -1578,11 +1569,11 @@ int carl9170_get_noisefloor(struct ar9170 *ar)
1578 return err; 1569 return err;
1579 1570
1580 for (i = 0; i < 2; i++) { 1571 for (i = 0; i < 2; i++) {
1581 ar->noise[i] = carl9170_calc_noise_dbm( 1572 ar->noise[i] = sign_extend32(GET_VAL(
1582 (phy_res[i] >> 19) & 0x1ff); 1573 AR9170_PHY_CCA_MIN_PWR, phy_res[i]), 8);
1583 1574
1584 ar->noise[i + 2] = carl9170_calc_noise_dbm( 1575 ar->noise[i + 2] = sign_extend32(GET_VAL(
1585 (phy_res[i + 2] >> 23) & 0x1ff); 1576 AR9170_PHY_EXT_CCA_MIN_PWR, phy_res[i + 2]), 8);
1586 } 1577 }
1587 1578
1588 return 0; 1579 return 0;
diff --git a/drivers/net/wireless/ath/carl9170/phy.h b/drivers/net/wireless/ath/carl9170/phy.h
index 02c34eb4ebd..024fb42bc78 100644
--- a/drivers/net/wireless/ath/carl9170/phy.h
+++ b/drivers/net/wireless/ath/carl9170/phy.h
@@ -139,8 +139,8 @@
139#define AR9170_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 139#define AR9170_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
140 140
141#define AR9170_PHY_REG_CCA (AR9170_PHY_REG_BASE + 0x0064) 141#define AR9170_PHY_REG_CCA (AR9170_PHY_REG_BASE + 0x0064)
142#define AR9170_PHY_CCA_MINCCA_PWR 0x0ff80000 142#define AR9170_PHY_CCA_MIN_PWR 0x0ff80000
143#define AR9170_PHY_CCA_MINCCA_PWR_S 19 143#define AR9170_PHY_CCA_MIN_PWR_S 19
144#define AR9170_PHY_CCA_THRESH62 0x0007f000 144#define AR9170_PHY_CCA_THRESH62 0x0007f000
145#define AR9170_PHY_CCA_THRESH62_S 12 145#define AR9170_PHY_CCA_THRESH62_S 12
146 146
@@ -338,8 +338,8 @@
338#define AR9170_PHY_EXT_CCA_CYCPWR_THR1_S 9 338#define AR9170_PHY_EXT_CCA_CYCPWR_THR1_S 9
339#define AR9170_PHY_EXT_CCA_THRESH62 0x007f0000 339#define AR9170_PHY_EXT_CCA_THRESH62 0x007f0000
340#define AR9170_PHY_EXT_CCA_THRESH62_S 16 340#define AR9170_PHY_EXT_CCA_THRESH62_S 16
341#define AR9170_PHY_EXT_MINCCA_PWR 0xff800000 341#define AR9170_PHY_EXT_CCA_MIN_PWR 0xff800000
342#define AR9170_PHY_EXT_MINCCA_PWR_S 23 342#define AR9170_PHY_EXT_CCA_MIN_PWR_S 23
343 343
344#define AR9170_PHY_REG_SFCORR_EXT (AR9170_PHY_REG_BASE + 0x01c0) 344#define AR9170_PHY_REG_SFCORR_EXT (AR9170_PHY_REG_BASE + 0x01c0)
345#define AR9170_PHY_SFCORR_EXT_M1_THRESH 0x0000007f 345#define AR9170_PHY_SFCORR_EXT_M1_THRESH 0x0000007f
@@ -546,19 +546,19 @@
546#define AR9170_PHY_FORCE_XPA_CFG_S 0 546#define AR9170_PHY_FORCE_XPA_CFG_S 0
547 547
548#define AR9170_PHY_REG_CH1_CCA (AR9170_PHY_REG_BASE + 0x1064) 548#define AR9170_PHY_REG_CH1_CCA (AR9170_PHY_REG_BASE + 0x1064)
549#define AR9170_PHY_CH1_MINCCA_PWR 0x0ff80000 549#define AR9170_PHY_CH1_CCA_MIN_PWR 0x0ff80000
550#define AR9170_PHY_CH1_MINCCA_PWR_S 19 550#define AR9170_PHY_CH1_CCA_MIN_PWR_S 19
551 551
552#define AR9170_PHY_REG_CH2_CCA (AR9170_PHY_REG_BASE + 0x2064) 552#define AR9170_PHY_REG_CH2_CCA (AR9170_PHY_REG_BASE + 0x2064)
553#define AR9170_PHY_CH2_MINCCA_PWR 0x0ff80000 553#define AR9170_PHY_CH2_CCA_MIN_PWR 0x0ff80000
554#define AR9170_PHY_CH2_MINCCA_PWR_S 19 554#define AR9170_PHY_CH2_CCA_MIN_PWR_S 19
555 555
556#define AR9170_PHY_REG_CH1_EXT_CCA (AR9170_PHY_REG_BASE + 0x11bc) 556#define AR9170_PHY_REG_CH1_EXT_CCA (AR9170_PHY_REG_BASE + 0x11bc)
557#define AR9170_PHY_CH1_EXT_MINCCA_PWR 0xff800000 557#define AR9170_PHY_CH1_EXT_CCA_MIN_PWR 0xff800000
558#define AR9170_PHY_CH1_EXT_MINCCA_PWR_S 23 558#define AR9170_PHY_CH1_EXT_CCA_MIN_PWR_S 23
559 559
560#define AR9170_PHY_REG_CH2_EXT_CCA (AR9170_PHY_REG_BASE + 0x21bc) 560#define AR9170_PHY_REG_CH2_EXT_CCA (AR9170_PHY_REG_BASE + 0x21bc)
561#define AR9170_PHY_CH2_EXT_MINCCA_PWR 0xff800000 561#define AR9170_PHY_CH2_EXT_CCA_MIN_PWR 0xff800000
562#define AR9170_PHY_CH2_EXT_MINCCA_PWR_S 23 562#define AR9170_PHY_CH2_EXT_CCA_MIN_PWR_S 23
563 563
564#endif /* __CARL9170_SHARED_PHY_H */ 564#endif /* __CARL9170_SHARED_PHY_H */
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 7e6506a77bb..6cc58e052d1 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -242,9 +242,11 @@ static void carl9170_tx_release(struct kref *ref)
242 ar->tx_ampdu_schedule = true; 242 ar->tx_ampdu_schedule = true;
243 243
244 if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) { 244 if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
245 txinfo->status.ampdu_len = txinfo->pad[0]; 245 struct _carl9170_tx_superframe *super;
246 txinfo->status.ampdu_ack_len = txinfo->pad[1]; 246
247 txinfo->pad[0] = txinfo->pad[1] = 0; 247 super = (void *)skb->data;
248 txinfo->status.ampdu_len = super->s.rix;
249 txinfo->status.ampdu_ack_len = super->s.cnt;
248 } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) { 250 } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
249 /* 251 /*
250 * drop redundant tx_status reports: 252 * drop redundant tx_status reports:
@@ -337,7 +339,8 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
337 u8 tid; 339 u8 tid;
338 340
339 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) || 341 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
340 txinfo->flags & IEEE80211_TX_CTL_INJECTED) 342 txinfo->flags & IEEE80211_TX_CTL_INJECTED ||
343 (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
341 return; 344 return;
342 345
343 tx_info = IEEE80211_SKB_CB(skb); 346 tx_info = IEEE80211_SKB_CB(skb);
@@ -389,8 +392,8 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
389 sta_info->stats[tid].ampdu_ack_len++; 392 sta_info->stats[tid].ampdu_ack_len++;
390 393
391 if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) { 394 if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
392 txinfo->pad[0] = sta_info->stats[tid].ampdu_len; 395 super->s.rix = sta_info->stats[tid].ampdu_len;
393 txinfo->pad[1] = sta_info->stats[tid].ampdu_ack_len; 396 super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
394 txinfo->flags |= IEEE80211_TX_STAT_AMPDU; 397 txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
395 sta_info->stats[tid].clear = true; 398 sta_info->stats[tid].clear = true;
396 } 399 }
@@ -524,6 +527,59 @@ next:
524 } 527 }
525} 528}
526 529
530static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
531{
532 struct carl9170_sta_tid *iter;
533 struct sk_buff *skb;
534 struct ieee80211_tx_info *txinfo;
535 struct carl9170_tx_info *arinfo;
536 struct _carl9170_tx_superframe *super;
537 struct ieee80211_sta *sta;
538 struct ieee80211_vif *vif;
539 struct ieee80211_hdr *hdr;
540 unsigned int vif_id;
541
542 rcu_read_lock();
543 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
544 if (iter->state < CARL9170_TID_STATE_IDLE)
545 continue;
546
547 spin_lock_bh(&iter->lock);
548 skb = skb_peek(&iter->queue);
549 if (!skb)
550 goto unlock;
551
552 txinfo = IEEE80211_SKB_CB(skb);
553 arinfo = (void *)txinfo->rate_driver_data;
554 if (time_is_after_jiffies(arinfo->timeout +
555 msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
556 goto unlock;
557
558 super = (void *) skb->data;
559 hdr = (void *) super->frame_data;
560
561 vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
562 CARL9170_TX_SUPER_MISC_VIF_ID_S;
563
564 if (WARN_ON(vif_id >= AR9170_MAX_VIRTUAL_MAC))
565 goto unlock;
566
567 vif = rcu_dereference(ar->vif_priv[vif_id].vif);
568 if (WARN_ON(!vif))
569 goto unlock;
570
571 sta = ieee80211_find_sta(vif, hdr->addr1);
572 if (WARN_ON(!sta))
573 goto unlock;
574
575 ieee80211_stop_tx_ba_session(sta, iter->tid);
576unlock:
577 spin_unlock_bh(&iter->lock);
578
579 }
580 rcu_read_unlock();
581}
582
527void carl9170_tx_janitor(struct work_struct *work) 583void carl9170_tx_janitor(struct work_struct *work)
528{ 584{
529 struct ar9170 *ar = container_of(work, struct ar9170, 585 struct ar9170 *ar = container_of(work, struct ar9170,
@@ -534,6 +590,7 @@ void carl9170_tx_janitor(struct work_struct *work)
534 ar->tx_janitor_last_run = jiffies; 590 ar->tx_janitor_last_run = jiffies;
535 591
536 carl9170_check_queue_stop_timeout(ar); 592 carl9170_check_queue_stop_timeout(ar);
593 carl9170_tx_ampdu_timeout(ar);
537 594
538 if (!atomic_read(&ar->tx_total_queued)) 595 if (!atomic_read(&ar->tx_total_queued))
539 return; 596 return;
@@ -842,10 +899,8 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
842 if (unlikely(!sta || !cvif)) 899 if (unlikely(!sta || !cvif))
843 goto err_out; 900 goto err_out;
844 901
845 factor = min_t(unsigned int, 1u, 902 factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
846 info->control.sta->ht_cap.ampdu_factor); 903 density = sta->ht_cap.ampdu_density;
847
848 density = info->control.sta->ht_cap.ampdu_density;
849 904
850 if (density) { 905 if (density) {
851 /* 906 /*
@@ -1206,6 +1261,7 @@ static void carl9170_tx(struct ar9170 *ar)
1206static bool carl9170_tx_ampdu_queue(struct ar9170 *ar, 1261static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1207 struct ieee80211_sta *sta, struct sk_buff *skb) 1262 struct ieee80211_sta *sta, struct sk_buff *skb)
1208{ 1263{
1264 struct _carl9170_tx_superframe *super = (void *) skb->data;
1209 struct carl9170_sta_info *sta_info; 1265 struct carl9170_sta_info *sta_info;
1210 struct carl9170_sta_tid *agg; 1266 struct carl9170_sta_tid *agg;
1211 struct sk_buff *iter; 1267 struct sk_buff *iter;
@@ -1274,6 +1330,7 @@ err_unlock:
1274 1330
1275err_unlock_rcu: 1331err_unlock_rcu:
1276 rcu_read_unlock(); 1332 rcu_read_unlock();
1333 super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR);
1277 carl9170_tx_status(ar, skb, false); 1334 carl9170_tx_status(ar, skb, false);
1278 ar->tx_dropped++; 1335 ar->tx_dropped++;
1279 return false; 1336 return false;
@@ -1302,9 +1359,6 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1302 */ 1359 */
1303 1360
1304 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1361 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1305 if (WARN_ON_ONCE(!sta))
1306 goto err_free;
1307
1308 run = carl9170_tx_ampdu_queue(ar, sta, skb); 1362 run = carl9170_tx_ampdu_queue(ar, sta, skb);
1309 if (run) 1363 if (run)
1310 carl9170_tx_ampdu(ar); 1364 carl9170_tx_ampdu(ar);
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 7504ed14c72..a268053e18e 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -433,7 +433,7 @@ static void carl9170_usb_rx_complete(struct urb *urb)
433 * device. 433 * device.
434 */ 434 */
435 435
436 carl9170_restart(ar, CARL9170_RR_SLOW_SYSTEM); 436 ieee80211_queue_work(ar->hw, &ar->ping_work);
437 } 437 }
438 } else { 438 } else {
439 /* 439 /*
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index ff53f078a0b..ee0f84f2a2f 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
1#ifndef __CARL9170_SHARED_VERSION_H 1#ifndef __CARL9170_SHARED_VERSION_H
2#define __CARL9170_SHARED_VERSION_H 2#define __CARL9170_SHARED_VERSION_H
3#define CARL9170FW_VERSION_YEAR 10 3#define CARL9170FW_VERSION_YEAR 10
4#define CARL9170FW_VERSION_MONTH 9 4#define CARL9170FW_VERSION_MONTH 10
5#define CARL9170FW_VERSION_DAY 28 5#define CARL9170FW_VERSION_DAY 29
6#define CARL9170FW_VERSION_GIT "1.8.8.3" 6#define CARL9170FW_VERSION_GIT "1.9.0"
7#endif /* __CARL9170_SHARED_VERSION_H */ 7#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
index dacfb234f49..a9600ba8cea 100644
--- a/drivers/net/wireless/ath/debug.c
+++ b/drivers/net/wireless/ath/debug.c
@@ -19,14 +19,19 @@
19 19
20void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...) 20void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
21{ 21{
22 struct va_format vaf;
22 va_list args; 23 va_list args;
23 24
24 if (likely(!(common->debug_mask & dbg_mask))) 25 if (likely(!(common->debug_mask & dbg_mask)))
25 return; 26 return;
26 27
27 va_start(args, fmt); 28 va_start(args, fmt);
28 printk(KERN_DEBUG "ath: "); 29
29 vprintk(fmt, args); 30 vaf.fmt = fmt;
31 vaf.va = &args;
32
33 printk(KERN_DEBUG "ath: %pV", &vaf);
34
30 va_end(args); 35 va_end(args);
31} 36}
32EXPORT_SYMBOL(ath_print); 37EXPORT_SYMBOL(ath_print);
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index 64e4af2c288..f207007ee39 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -70,11 +70,13 @@ enum ATH_DEBUG {
70#ifdef CONFIG_ATH_DEBUG 70#ifdef CONFIG_ATH_DEBUG
71void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...) 71void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
72 __attribute__ ((format (printf, 3, 4))); 72 __attribute__ ((format (printf, 3, 4)));
73#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
73#else 74#else
74static inline void __attribute__ ((format (printf, 3, 4))) 75static inline void __attribute__ ((format (printf, 3, 4)))
75ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...) 76ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
76{ 77{
77} 78}
79#define ATH_DBG_WARN(foo, arg)
78#endif /* CONFIG_ATH_DEBUG */ 80#endif /* CONFIG_ATH_DEBUG */
79 81
80/** Returns string describing opmode, or NULL if unknown mode. */ 82/** Returns string describing opmode, or NULL if unknown mode. */
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index bd21a4d8208..62e3dac8f92 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -67,7 +67,8 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
67} 67}
68EXPORT_SYMBOL(ath_hw_keyreset); 68EXPORT_SYMBOL(ath_hw_keyreset);
69 69
70bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) 70static bool ath_hw_keysetmac(struct ath_common *common,
71 u16 entry, const u8 *mac)
71{ 72{
72 u32 macHi, macLo; 73 u32 macHi, macLo;
73 u32 unicast_flag = AR_KEYTABLE_VALID; 74 u32 unicast_flag = AR_KEYTABLE_VALID;
@@ -107,9 +108,9 @@ bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
107 return true; 108 return true;
108} 109}
109 110
110bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, 111static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
111 const struct ath_keyval *k, 112 const struct ath_keyval *k,
112 const u8 *mac) 113 const u8 *mac)
113{ 114{
114 void *ah = common->ah; 115 void *ah = common->ah;
115 u32 key0, key1, key2, key3, key4; 116 u32 key0, key1, key2, key3, key4;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index c8f7090b27d..46e382ed46a 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1161,7 +1161,7 @@ static irqreturn_t service_interrupt(int irq, void *dev_id)
1161 struct atmel_private *priv = netdev_priv(dev); 1161 struct atmel_private *priv = netdev_priv(dev);
1162 u8 isr; 1162 u8 isr;
1163 int i = -1; 1163 int i = -1;
1164 static u8 irq_order[] = { 1164 static const u8 irq_order[] = {
1165 ISR_OUT_OF_RANGE, 1165 ISR_OUT_OF_RANGE,
1166 ISR_RxCOMPLETE, 1166 ISR_RxCOMPLETE,
1167 ISR_TxCOMPLETE, 1167 ISR_TxCOMPLETE,
@@ -3771,7 +3771,9 @@ static int probe_atmel_card(struct net_device *dev)
3771 3771
3772 if (rc) { 3772 if (rc) {
3773 if (dev->dev_addr[0] == 0xFF) { 3773 if (dev->dev_addr[0] == 0xFF) {
3774 u8 default_mac[] = {0x00, 0x04, 0x25, 0x00, 0x00, 0x00}; 3774 static const u8 default_mac[] = {
3775 0x00, 0x04, 0x25, 0x00, 0x00, 0x00
3776 };
3775 printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name); 3777 printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
3776 memcpy(dev->dev_addr, default_mac, 6); 3778 memcpy(dev->dev_addr, default_mac, 6);
3777 } 3779 }
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 72821c456b0..9aad2ca3c11 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -153,6 +153,19 @@
153#define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna 153#define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna
154 * with bluetooth */ 154 * with bluetooth */
155 155
156/* SPROM boardflags2_lo values */
157#define B43_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */
158#define B43_BFL2_APLL_WAR 0x0002 /* alternative A-band PLL settings implemented */
159#define B43_BFL2_TXPWRCTRL_EN 0x0004 /* permits enabling TX Power Control */
160#define B43_BFL2_2X4_DIV 0x0008 /* 2x4 diversity switch */
161#define B43_BFL2_5G_PWRGAIN 0x0010 /* supports 5G band power gain */
162#define B43_BFL2_PCIEWAR_OVR 0x0020 /* overrides ASPM and Clkreq settings */
163#define B43_BFL2_CAESERS_BRD 0x0040 /* is Caesers board (unused) */
164#define B43_BFL2_BTC3WIRE 0x0080 /* used 3-wire bluetooth coexist */
165#define B43_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */
166#define B43_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */
167#define B43_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */
168
156/* GPIO register offset, in both ChipCommon and PCI core. */ 169/* GPIO register offset, in both ChipCommon and PCI core. */
157#define B43_GPIO_CONTROL 0x6c 170#define B43_GPIO_CONTROL 0x6c
158 171
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 10d0aaf754c..3d5566e7af0 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -415,11 +415,6 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
415 415
416static void free_ringmemory(struct b43_dmaring *ring) 416static void free_ringmemory(struct b43_dmaring *ring)
417{ 417{
418 gfp_t flags = GFP_KERNEL;
419
420 if (ring->type == B43_DMA_64BIT)
421 flags |= GFP_DMA;
422
423 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, 418 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
424 ring->descbase, ring->dmabase); 419 ring->descbase, ring->dmabase);
425} 420}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index a1186525c70..fa488036658 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -322,59 +322,83 @@ static int b43_ratelimit(struct b43_wl *wl)
322 322
323void b43info(struct b43_wl *wl, const char *fmt, ...) 323void b43info(struct b43_wl *wl, const char *fmt, ...)
324{ 324{
325 struct va_format vaf;
325 va_list args; 326 va_list args;
326 327
327 if (b43_modparam_verbose < B43_VERBOSITY_INFO) 328 if (b43_modparam_verbose < B43_VERBOSITY_INFO)
328 return; 329 return;
329 if (!b43_ratelimit(wl)) 330 if (!b43_ratelimit(wl))
330 return; 331 return;
332
331 va_start(args, fmt); 333 va_start(args, fmt);
332 printk(KERN_INFO "b43-%s: ", 334
333 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 335 vaf.fmt = fmt;
334 vprintk(fmt, args); 336 vaf.va = &args;
337
338 printk(KERN_INFO "b43-%s: %pV",
339 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
340
335 va_end(args); 341 va_end(args);
336} 342}
337 343
338void b43err(struct b43_wl *wl, const char *fmt, ...) 344void b43err(struct b43_wl *wl, const char *fmt, ...)
339{ 345{
346 struct va_format vaf;
340 va_list args; 347 va_list args;
341 348
342 if (b43_modparam_verbose < B43_VERBOSITY_ERROR) 349 if (b43_modparam_verbose < B43_VERBOSITY_ERROR)
343 return; 350 return;
344 if (!b43_ratelimit(wl)) 351 if (!b43_ratelimit(wl))
345 return; 352 return;
353
346 va_start(args, fmt); 354 va_start(args, fmt);
347 printk(KERN_ERR "b43-%s ERROR: ", 355
348 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 356 vaf.fmt = fmt;
349 vprintk(fmt, args); 357 vaf.va = &args;
358
359 printk(KERN_ERR "b43-%s ERROR: %pV",
360 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
361
350 va_end(args); 362 va_end(args);
351} 363}
352 364
353void b43warn(struct b43_wl *wl, const char *fmt, ...) 365void b43warn(struct b43_wl *wl, const char *fmt, ...)
354{ 366{
367 struct va_format vaf;
355 va_list args; 368 va_list args;
356 369
357 if (b43_modparam_verbose < B43_VERBOSITY_WARN) 370 if (b43_modparam_verbose < B43_VERBOSITY_WARN)
358 return; 371 return;
359 if (!b43_ratelimit(wl)) 372 if (!b43_ratelimit(wl))
360 return; 373 return;
374
361 va_start(args, fmt); 375 va_start(args, fmt);
362 printk(KERN_WARNING "b43-%s warning: ", 376
363 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 377 vaf.fmt = fmt;
364 vprintk(fmt, args); 378 vaf.va = &args;
379
380 printk(KERN_WARNING "b43-%s warning: %pV",
381 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
382
365 va_end(args); 383 va_end(args);
366} 384}
367 385
368void b43dbg(struct b43_wl *wl, const char *fmt, ...) 386void b43dbg(struct b43_wl *wl, const char *fmt, ...)
369{ 387{
388 struct va_format vaf;
370 va_list args; 389 va_list args;
371 390
372 if (b43_modparam_verbose < B43_VERBOSITY_DEBUG) 391 if (b43_modparam_verbose < B43_VERBOSITY_DEBUG)
373 return; 392 return;
393
374 va_start(args, fmt); 394 va_start(args, fmt);
375 printk(KERN_DEBUG "b43-%s debug: ", 395
376 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 396 vaf.fmt = fmt;
377 vprintk(fmt, args); 397 vaf.va = &args;
398
399 printk(KERN_DEBUG "b43-%s debug: %pV",
400 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
401
378 va_end(args); 402 va_end(args);
379} 403}
380 404
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 7b2ea678145..fa7f83fc8db 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -427,9 +427,11 @@ void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
427/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */ 427/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
428struct b43_c32 b43_cordic(int theta) 428struct b43_c32 b43_cordic(int theta)
429{ 429{
430 u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304, 430 static const u32 arctg[] = {
431 58666, 29335, 14668, 7334, 3667, 1833, 917, 458, 431 2949120, 1740967, 919879, 466945, 234379, 117304,
432 229, 115, 57, 29, }; 432 58666, 29335, 14668, 7334, 3667, 1833,
433 917, 458, 229, 115, 57, 29,
434 };
433 u8 i; 435 u8 i;
434 s32 tmp; 436 s32 tmp;
435 s8 signx = 1; 437 s8 signx = 1;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index e0f2d122e12..9769483156e 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -191,7 +191,8 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
191 binfo->type != 0x46D || 191 binfo->type != 0x46D ||
192 binfo->rev < 0x41); 192 binfo->rev < 0x41);
193 else 193 else
194 workaround = ((sprom->boardflags_hi & B43_BFH_NOPA) == 0); 194 workaround =
195 !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
195 196
196 b43_radio_mask(dev, B2055_MASTER1, 0xFFF3); 197 b43_radio_mask(dev, B2055_MASTER1, 0xFFF3);
197 if (workaround) { 198 if (workaround) {
@@ -240,10 +241,13 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
240static void b43_radio_init2055(struct b43_wldev *dev) 241static void b43_radio_init2055(struct b43_wldev *dev)
241{ 242{
242 b43_radio_init2055_pre(dev); 243 b43_radio_init2055_pre(dev);
243 if (b43_status(dev) < B43_STAT_INITIALIZED) 244 if (b43_status(dev) < B43_STAT_INITIALIZED) {
244 b2055_upload_inittab(dev, 0, 1); 245 /* Follow wl, not specs. Do not force uploading all regs */
245 else 246 b2055_upload_inittab(dev, 0, 0);
246 b2055_upload_inittab(dev, 0/*FIXME on 5ghz band*/, 0); 247 } else {
248 bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ;
249 b2055_upload_inittab(dev, ghz5, 0);
250 }
247 b43_radio_init2055_post(dev); 251 b43_radio_init2055_post(dev);
248} 252}
249 253
@@ -569,7 +573,6 @@ static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
569 ii = est.i1_pwr; 573 ii = est.i1_pwr;
570 qq = est.q1_pwr; 574 qq = est.q1_pwr;
571 } else { 575 } else {
572 B43_WARN_ON(1);
573 continue; 576 continue;
574 } 577 }
575 578
@@ -651,7 +654,8 @@ static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
651} 654}
652 655
653/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ 656/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
654static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st) 657static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
658 const u16 *clip_st)
655{ 659{
656 b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]); 660 b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
657 b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); 661 b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
@@ -727,7 +731,7 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
727 struct b43_phy_n *nphy = phy->n; 731 struct b43_phy_n *nphy = phy->n;
728 732
729 if (enable) { 733 if (enable) {
730 u16 clip[] = { 0xFFFF, 0xFFFF }; 734 static const u16 clip[] = { 0xFFFF, 0xFFFF };
731 if (nphy->deaf_count++ == 0) { 735 if (nphy->deaf_count++ == 0) {
732 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); 736 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
733 b43_nphy_classifier(dev, 0x7, 0); 737 b43_nphy_classifier(dev, 0x7, 0);
@@ -839,7 +843,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
839 u16 data[4]; 843 u16 data[4];
840 s16 gain[2]; 844 s16 gain[2];
841 u16 minmax[2]; 845 u16 minmax[2];
842 u16 lna_gain[4] = { -2, 10, 19, 25 }; 846 static const u16 lna_gain[4] = { -2, 10, 19, 25 };
843 847
844 if (nphy->hang_avoid) 848 if (nphy->hang_avoid)
845 b43_nphy_stay_in_carrier_search(dev, 1); 849 b43_nphy_stay_in_carrier_search(dev, 1);
@@ -871,7 +875,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
871 data[2] = lna_gain[2] + gain[i]; 875 data[2] = lna_gain[2] + gain[i];
872 data[3] = lna_gain[3] + gain[i]; 876 data[3] = lna_gain[3] + gain[i];
873 } 877 }
874 b43_ntab_write_bulk(dev, B43_NTAB16(10, 8), 4, data); 878 b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
875 879
876 minmax[i] = 23 + gain[i]; 880 minmax[i] = 23 + gain[i];
877 } 881 }
@@ -891,6 +895,7 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
891 struct b43_phy_n *nphy = dev->phy.n; 895 struct b43_phy_n *nphy = dev->phy.n;
892 u8 i, j; 896 u8 i, j;
893 u8 code; 897 u8 code;
898 u16 tmp;
894 899
895 /* TODO: for PHY >= 3 900 /* TODO: for PHY >= 3
896 s8 *lna1_gain, *lna2_gain; 901 s8 *lna1_gain, *lna2_gain;
@@ -913,15 +918,15 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
913 B43_NPHY_C2_CGAINI_CL2DETECT); 918 B43_NPHY_C2_CGAINI_CL2DETECT);
914 919
915 /* Set narrowband clip threshold */ 920 /* Set narrowband clip threshold */
916 b43_phy_set(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84); 921 b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
917 b43_phy_set(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84); 922 b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
918 923
919 if (!dev->phy.is_40mhz) { 924 if (!dev->phy.is_40mhz) {
920 /* Set dwell lengths */ 925 /* Set dwell lengths */
921 b43_phy_set(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B); 926 b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
922 b43_phy_set(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B); 927 b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
923 b43_phy_set(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009); 928 b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
924 b43_phy_set(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009); 929 b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
925 } 930 }
926 931
927 /* Set wideband clip 2 threshold */ 932 /* Set wideband clip 2 threshold */
@@ -943,7 +948,7 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
943 ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1); 948 ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
944 } 949 }
945 950
946 b43_phy_set(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); 951 b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
947 952
948 if (nphy->gain_boost) { 953 if (nphy->gain_boost) {
949 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && 954 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
@@ -964,10 +969,10 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
964 code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT); 969 code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
965 970
966 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); 971 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
967 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 972 /* specs say about 2 loops, but wl does 4 */
968 (code << 8 | 0x7C)); 973 for (i = 0; i < 4; i++)
969 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 974 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
970 (code << 8 | 0x7C)); 975 (code << 8 | 0x7C));
971 976
972 b43_nphy_adjust_lna_gain_table(dev); 977 b43_nphy_adjust_lna_gain_table(dev);
973 978
@@ -985,19 +990,21 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
985 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); 990 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
986 991
987 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); 992 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
988 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 993 /* specs say about 2 loops, but wl does 4 */
989 (code << 8 | 0x74)); 994 for (i = 0; i < 4; i++)
990 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 995 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
991 (code << 8 | 0x74)); 996 (code << 8 | 0x74));
992 } 997 }
993 998
994 if (dev->phy.rev == 2) { 999 if (dev->phy.rev == 2) {
995 for (i = 0; i < 4; i++) { 1000 for (i = 0; i < 4; i++) {
996 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 1001 b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
997 (0x0400 * i) + 0x0020); 1002 (0x0400 * i) + 0x0020);
998 for (j = 0; j < 21; j++) 1003 for (j = 0; j < 21; j++) {
1004 tmp = j * (i < 2 ? 3 : 1);
999 b43_phy_write(dev, 1005 b43_phy_write(dev,
1000 B43_NPHY_TABLE_DATALO, 3 * j); 1006 B43_NPHY_TABLE_DATALO, tmp);
1007 }
1001 } 1008 }
1002 1009
1003 b43_nphy_set_rf_sequence(dev, 5, 1010 b43_nphy_set_rf_sequence(dev, 5,
@@ -1026,7 +1033,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
1026 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; 1033 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
1027 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; 1034 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
1028 1035
1029 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 1036 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
1030 b43_nphy_classifier(dev, 1, 0); 1037 b43_nphy_classifier(dev, 1, 0);
1031 else 1038 else
1032 b43_nphy_classifier(dev, 1, 1); 1039 b43_nphy_classifier(dev, 1, 1);
@@ -1565,19 +1572,20 @@ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
1565 } 1572 }
1566} 1573}
1567 1574
1575/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
1568static void b43_nphy_bphy_init(struct b43_wldev *dev) 1576static void b43_nphy_bphy_init(struct b43_wldev *dev)
1569{ 1577{
1570 unsigned int i; 1578 unsigned int i;
1571 u16 val; 1579 u16 val;
1572 1580
1573 val = 0x1E1F; 1581 val = 0x1E1F;
1574 for (i = 0; i < 14; i++) { 1582 for (i = 0; i < 16; i++) {
1575 b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val); 1583 b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
1576 val -= 0x202; 1584 val -= 0x202;
1577 } 1585 }
1578 val = 0x3E3F; 1586 val = 0x3E3F;
1579 for (i = 0; i < 16; i++) { 1587 for (i = 0; i < 16; i++) {
1580 b43_phy_write(dev, B43_PHY_N_BMODE(0x97 + i), val); 1588 b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
1581 val -= 0x202; 1589 val -= 0x202;
1582 } 1590 }
1583 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); 1591 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
@@ -1837,6 +1845,14 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
1837 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); 1845 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
1838 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); 1846 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
1839 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); 1847 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
1848 } else if (dev->phy.rev == 2) {
1849 save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
1850 save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
1851 save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
1852 save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_RFCTL_CMD);
1853 save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
1854 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
1855 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
1840 } 1856 }
1841 1857
1842 b43_nphy_rssi_select(dev, 5, type); 1858 b43_nphy_rssi_select(dev, 5, type);
@@ -1880,6 +1896,14 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
1880 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]); 1896 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
1881 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]); 1897 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
1882 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]); 1898 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
1899 } else if (dev->phy.rev == 2) {
1900 b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]);
1901 b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]);
1902 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]);
1903 b43_phy_write(dev, B43_NPHY_RFCTL_CMD, save_regs_phy[3]);
1904 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, save_regs_phy[4]);
1905 b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, save_regs_phy[5]);
1906 b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, save_regs_phy[6]);
1883 } 1907 }
1884 1908
1885 return out; 1909 return out;
@@ -2004,7 +2028,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
2004 } 2028 }
2005 2029
2006 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]); 2030 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
2007 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]); 2031 b43_radio_maskset(dev, B2055_C2_PD_RSSIMISC, 0xF8, state[1]);
2008 2032
2009 switch (state[2]) { 2033 switch (state[2]) {
2010 case 1: 2034 case 1:
@@ -2295,7 +2319,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
2295{ 2319{
2296 int i, j; 2320 int i, j;
2297 /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */ 2321 /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
2298 u16 offset[] = { 0x186, 0x195, 0x2C5 }; 2322 static const u16 offset[] = { 0x186, 0x195, 0x2C5 };
2299 2323
2300 for (i = 0; i < 3; i++) 2324 for (i = 0; i < 3; i++)
2301 for (j = 0; j < 15; j++) 2325 for (j = 0; j < 15; j++)
@@ -3088,7 +3112,7 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
3088{ 3112{
3089 struct b43_phy *phy = &dev->phy; 3113 struct b43_phy *phy = &dev->phy;
3090 struct b43_phy_n *nphy = phy->n; 3114 struct b43_phy_n *nphy = phy->n;
3091 u16 buf[16]; 3115 /* u16 buf[16]; it's rev3+ */
3092 3116
3093 nphy->phyrxchain = mask; 3117 nphy->phyrxchain = mask;
3094 3118
@@ -3232,6 +3256,9 @@ int b43_phy_initn(struct b43_wldev *dev)
3232 3256
3233 b43_nphy_classifier(dev, 0, 0); 3257 b43_nphy_classifier(dev, 0, 0);
3234 b43_nphy_read_clip_detection(dev, clip); 3258 b43_nphy_read_clip_detection(dev, clip);
3259 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
3260 b43_nphy_bphy_init(dev);
3261
3235 tx_pwr_state = nphy->txpwrctrl; 3262 tx_pwr_state = nphy->txpwrctrl;
3236 /* TODO N PHY TX power control with argument 0 3263 /* TODO N PHY TX power control with argument 0
3237 (turning off power control) */ 3264 (turning off power control) */
@@ -3381,7 +3408,6 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
3381 enum nl80211_channel_type channel_type) 3408 enum nl80211_channel_type channel_type)
3382{ 3409{
3383 struct b43_phy *phy = &dev->phy; 3410 struct b43_phy *phy = &dev->phy;
3384 struct b43_phy_n *nphy = dev->phy.n;
3385 3411
3386 const struct b43_nphy_channeltab_entry_rev2 *tabent_r2; 3412 const struct b43_nphy_channeltab_entry_rev2 *tabent_r2;
3387 const struct b43_nphy_channeltab_entry_rev3 *tabent_r3; 3413 const struct b43_nphy_channeltab_entry_rev3 *tabent_r3;
@@ -3451,7 +3477,9 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
3451 3477
3452 memset(nphy, 0, sizeof(*nphy)); 3478 memset(nphy, 0, sizeof(*nphy));
3453 3479
3454 //TODO init struct b43_phy_n 3480 nphy->gain_boost = true; /* this way we follow wl, assume it is true */
3481 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
3482 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
3455} 3483}
3456 3484
3457static void b43_nphy_op_free(struct b43_wldev *dev) 3485static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -3524,8 +3552,6 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
3524static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, 3552static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
3525 bool blocked) 3553 bool blocked)
3526{ 3554{
3527 struct b43_phy_n *nphy = dev->phy.n;
3528
3529 if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED) 3555 if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
3530 b43err(dev->wl, "MAC not suspended\n"); 3556 b43err(dev->wl, "MAC not suspended\n");
3531 3557
diff --git a/drivers/net/wireless/b43/radio_2055.c b/drivers/net/wireless/b43/radio_2055.c
index 1b5316586cb..10910dc4184 100644
--- a/drivers/net/wireless/b43/radio_2055.c
+++ b/drivers/net/wireless/b43/radio_2055.c
@@ -244,7 +244,7 @@ static const struct b2055_inittab_entry b2055_inittab [] = {
244 [0xCB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 244 [0xCB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
245 [0xCC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 245 [0xCC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
246 [B2055_C1_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 246 [B2055_C1_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
247 [0xCE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 247 [0xCE] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
248 [0xCF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 248 [0xCF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
249 [0xD0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 249 [0xD0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
250 [0xD1] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, }, 250 [0xD1] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
@@ -256,7 +256,7 @@ static const struct b2055_inittab_entry b2055_inittab [] = {
256 [0xD7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 256 [0xD7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
257 [0xD8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 257 [0xD8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
258 [B2055_C2_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 258 [B2055_C2_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
259 [0xDA] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 259 [0xDA] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
260 [0xDB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 260 [0xDB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
261 [0xDC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 261 [0xDC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
262 [0xDD] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, }, 262 [0xDD] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
@@ -307,7 +307,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
307 RADIOREGS(0x71, 0x01, 0xEC, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 307 RADIOREGS(0x71, 0x01, 0xEC, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
308 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F, 308 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
309 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 309 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
310 PHYREGS(0xB407, 0xB007, 0xAC07, 0x1402, 0x1502, 0x1602), 310 PHYREGS(0x07B4, 0x07B0, 0x07AC, 0x0214, 0x0215, 0x0216),
311 }, 311 },
312 { .channel = 186, 312 { .channel = 186,
313 .freq = 4930, /* MHz */ 313 .freq = 4930, /* MHz */
@@ -315,7 +315,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
315 RADIOREGS(0x71, 0x01, 0xED, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 315 RADIOREGS(0x71, 0x01, 0xED, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
316 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F, 316 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
317 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 317 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
318 PHYREGS(0xB807, 0xB407, 0xB007, 0x1302, 0x1402, 0x1502), 318 PHYREGS(0x07B8, 0x07B4, 0x07B0, 0x0213, 0x0214, 0x0215),
319 }, 319 },
320 { .channel = 188, 320 { .channel = 188,
321 .freq = 4940, /* MHz */ 321 .freq = 4940, /* MHz */
@@ -323,7 +323,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
323 RADIOREGS(0x71, 0x01, 0xEE, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 323 RADIOREGS(0x71, 0x01, 0xEE, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
324 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, 324 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
325 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 325 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
326 PHYREGS(0xBC07, 0xB807, 0xB407, 0x1202, 0x1302, 0x1402), 326 PHYREGS(0x07BC, 0x07B8, 0x07B4, 0x0212, 0x0213, 0x0214),
327 }, 327 },
328 { .channel = 190, 328 { .channel = 190,
329 .freq = 4950, /* MHz */ 329 .freq = 4950, /* MHz */
@@ -331,7 +331,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
331 RADIOREGS(0x71, 0x01, 0xEF, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 331 RADIOREGS(0x71, 0x01, 0xEF, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
332 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, 332 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
333 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 333 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
334 PHYREGS(0xC007, 0xBC07, 0xB807, 0x1102, 0x1202, 0x1302), 334 PHYREGS(0x07C0, 0x07BC, 0x07B8, 0x0211, 0x0212, 0x0213),
335 }, 335 },
336 { .channel = 192, 336 { .channel = 192,
337 .freq = 4960, /* MHz */ 337 .freq = 4960, /* MHz */
@@ -339,7 +339,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
339 RADIOREGS(0x71, 0x01, 0xF0, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 339 RADIOREGS(0x71, 0x01, 0xF0, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
340 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, 340 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
341 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 341 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
342 PHYREGS(0xC407, 0xC007, 0xBC07, 0x0F02, 0x1102, 0x1202), 342 PHYREGS(0x07C4, 0x07C0, 0x07BC, 0x020F, 0x0211, 0x0212),
343 }, 343 },
344 { .channel = 194, 344 { .channel = 194,
345 .freq = 4970, /* MHz */ 345 .freq = 4970, /* MHz */
@@ -347,7 +347,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
347 RADIOREGS(0x71, 0x01, 0xF1, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 347 RADIOREGS(0x71, 0x01, 0xF1, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
348 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, 348 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
349 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 349 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
350 PHYREGS(0xC807, 0xC407, 0xC007, 0x0E02, 0x0F02, 0x1102), 350 PHYREGS(0x07C8, 0x07C4, 0x07C0, 0x020E, 0x020F, 0x0211),
351 }, 351 },
352 { .channel = 196, 352 { .channel = 196,
353 .freq = 4980, /* MHz */ 353 .freq = 4980, /* MHz */
@@ -355,7 +355,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
355 RADIOREGS(0x71, 0x01, 0xF2, 0x0E, 0xFF, 0x01, 0x04, 0x0A, 355 RADIOREGS(0x71, 0x01, 0xF2, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
356 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, 356 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
357 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 357 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
358 PHYREGS(0xCC07, 0xC807, 0xC407, 0x0D02, 0x0E02, 0x0F02), 358 PHYREGS(0x07CC, 0x07C8, 0x07C4, 0x020D, 0x020E, 0x020F),
359 }, 359 },
360 { .channel = 198, 360 { .channel = 198,
361 .freq = 4990, /* MHz */ 361 .freq = 4990, /* MHz */
@@ -363,7 +363,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
363 RADIOREGS(0x71, 0x01, 0xF3, 0x0E, 0xFF, 0x01, 0x04, 0x0A, 363 RADIOREGS(0x71, 0x01, 0xF3, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
364 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, 364 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
365 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 365 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
366 PHYREGS(0xD007, 0xCC07, 0xC807, 0x0C02, 0x0D02, 0x0E02), 366 PHYREGS(0x07D0, 0x07CC, 0x07C8, 0x020C, 0x020D, 0x020E),
367 }, 367 },
368 { .channel = 200, 368 { .channel = 200,
369 .freq = 5000, /* MHz */ 369 .freq = 5000, /* MHz */
@@ -371,7 +371,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
371 RADIOREGS(0x71, 0x01, 0xF4, 0x0E, 0xFF, 0x01, 0x04, 0x0A, 371 RADIOREGS(0x71, 0x01, 0xF4, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
372 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, 372 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
373 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 373 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
374 PHYREGS(0xD407, 0xD007, 0xCC07, 0x0B02, 0x0C02, 0x0D02), 374 PHYREGS(0x07D4, 0x07D0, 0x07CC, 0x020B, 0x020C, 0x020D),
375 }, 375 },
376 { .channel = 202, 376 { .channel = 202,
377 .freq = 5010, /* MHz */ 377 .freq = 5010, /* MHz */
@@ -379,7 +379,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
379 RADIOREGS(0x71, 0x01, 0xF5, 0x0E, 0xFF, 0x01, 0x04, 0x0A, 379 RADIOREGS(0x71, 0x01, 0xF5, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
380 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, 380 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
381 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 381 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
382 PHYREGS(0xD807, 0xD407, 0xD007, 0x0A02, 0x0B02, 0x0C02), 382 PHYREGS(0x07D8, 0x07D4, 0x07D0, 0x020A, 0x020B, 0x020C),
383 }, 383 },
384 { .channel = 204, 384 { .channel = 204,
385 .freq = 5020, /* MHz */ 385 .freq = 5020, /* MHz */
@@ -387,7 +387,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
387 RADIOREGS(0x71, 0x01, 0xF6, 0x0E, 0xF7, 0x01, 0x04, 0x0A, 387 RADIOREGS(0x71, 0x01, 0xF6, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
388 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, 388 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
389 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 389 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
390 PHYREGS(0xDC07, 0xD807, 0xD407, 0x0902, 0x0A02, 0x0B02), 390 PHYREGS(0x07DC, 0x07D8, 0x07D4, 0x0209, 0x020A, 0x020B),
391 }, 391 },
392 { .channel = 206, 392 { .channel = 206,
393 .freq = 5030, /* MHz */ 393 .freq = 5030, /* MHz */
@@ -395,7 +395,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
395 RADIOREGS(0x71, 0x01, 0xF7, 0x0E, 0xF7, 0x01, 0x04, 0x0A, 395 RADIOREGS(0x71, 0x01, 0xF7, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
396 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, 396 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
397 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 397 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
398 PHYREGS(0xE007, 0xDC07, 0xD807, 0x0802, 0x0902, 0x0A02), 398 PHYREGS(0x07E0, 0x07DC, 0x07D8, 0x0208, 0x0209, 0x020A),
399 }, 399 },
400 { .channel = 208, 400 { .channel = 208,
401 .freq = 5040, /* MHz */ 401 .freq = 5040, /* MHz */
@@ -403,7 +403,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
403 RADIOREGS(0x71, 0x01, 0xF8, 0x0D, 0xEF, 0x01, 0x04, 0x0A, 403 RADIOREGS(0x71, 0x01, 0xF8, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
404 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, 404 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
405 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 405 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
406 PHYREGS(0xE407, 0xE007, 0xDC07, 0x0702, 0x0802, 0x0902), 406 PHYREGS(0x07E4, 0x07E0, 0x07DC, 0x0207, 0x0208, 0x0209),
407 }, 407 },
408 { .channel = 210, 408 { .channel = 210,
409 .freq = 5050, /* MHz */ 409 .freq = 5050, /* MHz */
@@ -411,7 +411,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
411 RADIOREGS(0x71, 0x01, 0xF9, 0x0D, 0xEF, 0x01, 0x04, 0x0A, 411 RADIOREGS(0x71, 0x01, 0xF9, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
412 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, 412 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
413 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 413 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
414 PHYREGS(0xE807, 0xE407, 0xE007, 0x0602, 0x0702, 0x0802), 414 PHYREGS(0x07E8, 0x07E4, 0x07E0, 0x0206, 0x0207, 0x0208),
415 }, 415 },
416 { .channel = 212, 416 { .channel = 212,
417 .freq = 5060, /* MHz */ 417 .freq = 5060, /* MHz */
@@ -419,7 +419,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
419 RADIOREGS(0x71, 0x01, 0xFA, 0x0D, 0xE6, 0x01, 0x04, 0x0A, 419 RADIOREGS(0x71, 0x01, 0xFA, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
420 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F, 420 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
421 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E), 421 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
422 PHYREGS(0xEC07, 0xE807, 0xE407, 0x0502, 0x0602, 0x0702), 422 PHYREGS(0x07EC, 0x07E8, 0x07E4, 0x0205, 0x0206, 0x0207),
423 }, 423 },
424 { .channel = 214, 424 { .channel = 214,
425 .freq = 5070, /* MHz */ 425 .freq = 5070, /* MHz */
@@ -427,7 +427,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
427 RADIOREGS(0x71, 0x01, 0xFB, 0x0D, 0xE6, 0x01, 0x04, 0x0A, 427 RADIOREGS(0x71, 0x01, 0xFB, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
428 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F, 428 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
429 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E), 429 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
430 PHYREGS(0xF007, 0xEC07, 0xE807, 0x0402, 0x0502, 0x0602), 430 PHYREGS(0x07F0, 0x07EC, 0x07E8, 0x0204, 0x0205, 0x0206),
431 }, 431 },
432 { .channel = 216, 432 { .channel = 216,
433 .freq = 5080, /* MHz */ 433 .freq = 5080, /* MHz */
@@ -435,7 +435,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
435 RADIOREGS(0x71, 0x01, 0xFC, 0x0D, 0xDE, 0x01, 0x04, 0x0A, 435 RADIOREGS(0x71, 0x01, 0xFC, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
436 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F, 436 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
437 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D), 437 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
438 PHYREGS(0xF407, 0xF007, 0xEC07, 0x0302, 0x0402, 0x0502), 438 PHYREGS(0x07F4, 0x07F0, 0x07EC, 0x0203, 0x0204, 0x0205),
439 }, 439 },
440 { .channel = 218, 440 { .channel = 218,
441 .freq = 5090, /* MHz */ 441 .freq = 5090, /* MHz */
@@ -443,7 +443,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
443 RADIOREGS(0x71, 0x01, 0xFD, 0x0D, 0xDE, 0x01, 0x04, 0x0A, 443 RADIOREGS(0x71, 0x01, 0xFD, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
444 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F, 444 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
445 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D), 445 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
446 PHYREGS(0xF807, 0xF407, 0xF007, 0x0202, 0x0302, 0x0402), 446 PHYREGS(0x07F8, 0x07F4, 0x07F0, 0x0202, 0x0203, 0x0204),
447 }, 447 },
448 { .channel = 220, 448 { .channel = 220,
449 .freq = 5100, /* MHz */ 449 .freq = 5100, /* MHz */
@@ -451,7 +451,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
451 RADIOREGS(0x71, 0x01, 0xFE, 0x0C, 0xD6, 0x01, 0x04, 0x0A, 451 RADIOREGS(0x71, 0x01, 0xFE, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
452 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F, 452 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
453 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D), 453 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
454 PHYREGS(0xFC07, 0xF807, 0xF407, 0x0102, 0x0202, 0x0302), 454 PHYREGS(0x07FC, 0x07F8, 0x07F4, 0x0201, 0x0202, 0x0203),
455 }, 455 },
456 { .channel = 222, 456 { .channel = 222,
457 .freq = 5110, /* MHz */ 457 .freq = 5110, /* MHz */
@@ -459,7 +459,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
459 RADIOREGS(0x71, 0x01, 0xFF, 0x0C, 0xD6, 0x01, 0x04, 0x0A, 459 RADIOREGS(0x71, 0x01, 0xFF, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
460 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F, 460 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
461 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D), 461 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
462 PHYREGS(0x0008, 0xFC07, 0xF807, 0x0002, 0x0102, 0x0202), 462 PHYREGS(0x0800, 0x07FC, 0x07F8, 0x0200, 0x0201, 0x0202),
463 }, 463 },
464 { .channel = 224, 464 { .channel = 224,
465 .freq = 5120, /* MHz */ 465 .freq = 5120, /* MHz */
@@ -467,7 +467,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
467 RADIOREGS(0x71, 0x02, 0x00, 0x0C, 0xCE, 0x01, 0x04, 0x0A, 467 RADIOREGS(0x71, 0x02, 0x00, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
468 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F, 468 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
469 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C), 469 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
470 PHYREGS(0x0408, 0x0008, 0xFC07, 0xFF01, 0x0002, 0x0102), 470 PHYREGS(0x0804, 0x0800, 0x07FC, 0x01FF, 0x0200, 0x0201),
471 }, 471 },
472 { .channel = 226, 472 { .channel = 226,
473 .freq = 5130, /* MHz */ 473 .freq = 5130, /* MHz */
@@ -475,7 +475,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
475 RADIOREGS(0x71, 0x02, 0x01, 0x0C, 0xCE, 0x01, 0x04, 0x0A, 475 RADIOREGS(0x71, 0x02, 0x01, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
476 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F, 476 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
477 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C), 477 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
478 PHYREGS(0x0808, 0x0408, 0x0008, 0xFE01, 0xFF01, 0x0002), 478 PHYREGS(0x0808, 0x0804, 0x0800, 0x01FE, 0x01FF, 0x0200),
479 }, 479 },
480 { .channel = 228, 480 { .channel = 228,
481 .freq = 5140, /* MHz */ 481 .freq = 5140, /* MHz */
@@ -483,7 +483,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
483 RADIOREGS(0x71, 0x02, 0x02, 0x0C, 0xC6, 0x01, 0x04, 0x0A, 483 RADIOREGS(0x71, 0x02, 0x02, 0x0C, 0xC6, 0x01, 0x04, 0x0A,
484 0x00, 0x8D, 0x99, 0x99, 0xDD, 0x00, 0x0C, 0x0E, 484 0x00, 0x8D, 0x99, 0x99, 0xDD, 0x00, 0x0C, 0x0E,
485 0x8B, 0xDD, 0x00, 0x0C, 0x0E, 0x8B), 485 0x8B, 0xDD, 0x00, 0x0C, 0x0E, 0x8B),
486 PHYREGS(0x0C08, 0x0808, 0x0408, 0xFD01, 0xFE01, 0xFF01), 486 PHYREGS(0x080C, 0x0808, 0x0804, 0x01FD, 0x01FE, 0x01FF),
487 }, 487 },
488 { .channel = 32, 488 { .channel = 32,
489 .freq = 5160, /* MHz */ 489 .freq = 5160, /* MHz */
@@ -491,7 +491,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
491 RADIOREGS(0x71, 0x02, 0x04, 0x0B, 0xBE, 0x01, 0x04, 0x0A, 491 RADIOREGS(0x71, 0x02, 0x04, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
492 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D, 492 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
493 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A), 493 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
494 PHYREGS(0x1408, 0x1008, 0x0C08, 0xFB01, 0xFC01, 0xFD01), 494 PHYREGS(0x0814, 0x0810, 0x080C, 0x01FB, 0x01FC, 0x01FD),
495 }, 495 },
496 { .channel = 34, 496 { .channel = 34,
497 .freq = 5170, /* MHz */ 497 .freq = 5170, /* MHz */
@@ -499,7 +499,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
499 RADIOREGS(0x71, 0x02, 0x05, 0x0B, 0xBE, 0x01, 0x04, 0x0A, 499 RADIOREGS(0x71, 0x02, 0x05, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
500 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D, 500 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
501 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A), 501 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
502 PHYREGS(0x1808, 0x1408, 0x1008, 0xFA01, 0xFB01, 0xFC01), 502 PHYREGS(0x0818, 0x0814, 0x0810, 0x01FA, 0x01FB, 0x01FC),
503 }, 503 },
504 { .channel = 36, 504 { .channel = 36,
505 .freq = 5180, /* MHz */ 505 .freq = 5180, /* MHz */
@@ -507,7 +507,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
507 RADIOREGS(0x71, 0x02, 0x06, 0x0B, 0xB6, 0x01, 0x04, 0x0A, 507 RADIOREGS(0x71, 0x02, 0x06, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
508 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C, 508 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
509 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89), 509 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
510 PHYREGS(0x1C08, 0x1808, 0x1408, 0xF901, 0xFA01, 0xFB01), 510 PHYREGS(0x081C, 0x0818, 0x0814, 0x01F9, 0x01FA, 0x01FB),
511 }, 511 },
512 { .channel = 38, 512 { .channel = 38,
513 .freq = 5190, /* MHz */ 513 .freq = 5190, /* MHz */
@@ -515,7 +515,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
515 RADIOREGS(0x71, 0x02, 0x07, 0x0B, 0xB6, 0x01, 0x04, 0x0A, 515 RADIOREGS(0x71, 0x02, 0x07, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
516 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C, 516 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
517 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89), 517 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
518 PHYREGS(0x2008, 0x1C08, 0x1808, 0xF801, 0xF901, 0xFA01), 518 PHYREGS(0x0820, 0x081C, 0x0818, 0x01F8, 0x01F9, 0x01FA),
519 }, 519 },
520 { .channel = 40, 520 { .channel = 40,
521 .freq = 5200, /* MHz */ 521 .freq = 5200, /* MHz */
@@ -523,7 +523,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
523 RADIOREGS(0x71, 0x02, 0x08, 0x0B, 0xAF, 0x01, 0x04, 0x0A, 523 RADIOREGS(0x71, 0x02, 0x08, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
524 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B, 524 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
525 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89), 525 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
526 PHYREGS(0x2408, 0x2008, 0x1C08, 0xF701, 0xF801, 0xF901), 526 PHYREGS(0x0824, 0x0820, 0x081C, 0x01F7, 0x01F8, 0x01F9),
527 }, 527 },
528 { .channel = 42, 528 { .channel = 42,
529 .freq = 5210, /* MHz */ 529 .freq = 5210, /* MHz */
@@ -531,7 +531,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
531 RADIOREGS(0x71, 0x02, 0x09, 0x0B, 0xAF, 0x01, 0x04, 0x0A, 531 RADIOREGS(0x71, 0x02, 0x09, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
532 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B, 532 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
533 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89), 533 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
534 PHYREGS(0x2808, 0x2408, 0x2008, 0xF601, 0xF701, 0xF801), 534 PHYREGS(0x0828, 0x0824, 0x0820, 0x01F6, 0x01F7, 0x01F8),
535 }, 535 },
536 { .channel = 44, 536 { .channel = 44,
537 .freq = 5220, /* MHz */ 537 .freq = 5220, /* MHz */
@@ -539,7 +539,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
539 RADIOREGS(0x71, 0x02, 0x0A, 0x0A, 0xA7, 0x01, 0x04, 0x0A, 539 RADIOREGS(0x71, 0x02, 0x0A, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
540 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A, 540 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
541 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88), 541 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
542 PHYREGS(0x2C08, 0x2808, 0x2408, 0xF501, 0xF601, 0xF701), 542 PHYREGS(0x082C, 0x0828, 0x0824, 0x01F5, 0x01F6, 0x01F7),
543 }, 543 },
544 { .channel = 46, 544 { .channel = 46,
545 .freq = 5230, /* MHz */ 545 .freq = 5230, /* MHz */
@@ -547,7 +547,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
547 RADIOREGS(0x71, 0x02, 0x0B, 0x0A, 0xA7, 0x01, 0x04, 0x0A, 547 RADIOREGS(0x71, 0x02, 0x0B, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
548 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A, 548 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
549 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88), 549 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
550 PHYREGS(0x3008, 0x2C08, 0x2808, 0xF401, 0xF501, 0xF601), 550 PHYREGS(0x0830, 0x082C, 0x0828, 0x01F4, 0x01F5, 0x01F6),
551 }, 551 },
552 { .channel = 48, 552 { .channel = 48,
553 .freq = 5240, /* MHz */ 553 .freq = 5240, /* MHz */
@@ -555,7 +555,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
555 RADIOREGS(0x71, 0x02, 0x0C, 0x0A, 0xA0, 0x01, 0x04, 0x0A, 555 RADIOREGS(0x71, 0x02, 0x0C, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
556 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A, 556 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
557 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87), 557 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
558 PHYREGS(0x3408, 0x3008, 0x2C08, 0xF301, 0xF401, 0xF501), 558 PHYREGS(0x0834, 0x0830, 0x082C, 0x01F3, 0x01F4, 0x01F5),
559 }, 559 },
560 { .channel = 50, 560 { .channel = 50,
561 .freq = 5250, /* MHz */ 561 .freq = 5250, /* MHz */
@@ -563,7 +563,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
563 RADIOREGS(0x71, 0x02, 0x0D, 0x0A, 0xA0, 0x01, 0x04, 0x0A, 563 RADIOREGS(0x71, 0x02, 0x0D, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
564 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A, 564 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
565 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87), 565 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
566 PHYREGS(0x3808, 0x3408, 0x3008, 0xF201, 0xF301, 0xF401), 566 PHYREGS(0x0838, 0x0834, 0x0830, 0x01F2, 0x01F3, 0x01F4),
567 }, 567 },
568 { .channel = 52, 568 { .channel = 52,
569 .freq = 5260, /* MHz */ 569 .freq = 5260, /* MHz */
@@ -571,7 +571,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
571 RADIOREGS(0x71, 0x02, 0x0E, 0x0A, 0x98, 0x01, 0x04, 0x0A, 571 RADIOREGS(0x71, 0x02, 0x0E, 0x0A, 0x98, 0x01, 0x04, 0x0A,
572 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09, 572 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
573 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87), 573 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
574 PHYREGS(0x3C08, 0x3808, 0x3408, 0xF101, 0xF201, 0xF301), 574 PHYREGS(0x083C, 0x0838, 0x0834, 0x01F1, 0x01F2, 0x01F3),
575 }, 575 },
576 { .channel = 54, 576 { .channel = 54,
577 .freq = 5270, /* MHz */ 577 .freq = 5270, /* MHz */
@@ -579,7 +579,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
579 RADIOREGS(0x71, 0x02, 0x0F, 0x0A, 0x98, 0x01, 0x04, 0x0A, 579 RADIOREGS(0x71, 0x02, 0x0F, 0x0A, 0x98, 0x01, 0x04, 0x0A,
580 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09, 580 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
581 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87), 581 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
582 PHYREGS(0x4008, 0x3C08, 0x3808, 0xF001, 0xF101, 0xF201), 582 PHYREGS(0x0840, 0x083C, 0x0838, 0x01F0, 0x01F1, 0x01F2),
583 }, 583 },
584 { .channel = 56, 584 { .channel = 56,
585 .freq = 5280, /* MHz */ 585 .freq = 5280, /* MHz */
@@ -587,7 +587,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
587 RADIOREGS(0x71, 0x02, 0x10, 0x09, 0x91, 0x01, 0x04, 0x0A, 587 RADIOREGS(0x71, 0x02, 0x10, 0x09, 0x91, 0x01, 0x04, 0x0A,
588 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08, 588 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
589 0x86, 0x99, 0x00, 0x08, 0x08, 0x86), 589 0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
590 PHYREGS(0x4408, 0x4008, 0x3C08, 0xF001, 0xF001, 0xF101), 590 PHYREGS(0x0844, 0x0840, 0x083C, 0x01F0, 0x01F0, 0x01F1),
591 }, 591 },
592 { .channel = 58, 592 { .channel = 58,
593 .freq = 5290, /* MHz */ 593 .freq = 5290, /* MHz */
@@ -595,7 +595,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
595 RADIOREGS(0x71, 0x02, 0x11, 0x09, 0x91, 0x01, 0x04, 0x0A, 595 RADIOREGS(0x71, 0x02, 0x11, 0x09, 0x91, 0x01, 0x04, 0x0A,
596 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08, 596 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
597 0x86, 0x99, 0x00, 0x08, 0x08, 0x86), 597 0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
598 PHYREGS(0x4808, 0x4408, 0x4008, 0xEF01, 0xF001, 0xF001), 598 PHYREGS(0x0848, 0x0844, 0x0840, 0x01EF, 0x01F0, 0x01F0),
599 }, 599 },
600 { .channel = 60, 600 { .channel = 60,
601 .freq = 5300, /* MHz */ 601 .freq = 5300, /* MHz */
@@ -603,7 +603,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
603 RADIOREGS(0x71, 0x02, 0x12, 0x09, 0x8A, 0x01, 0x04, 0x0A, 603 RADIOREGS(0x71, 0x02, 0x12, 0x09, 0x8A, 0x01, 0x04, 0x0A,
604 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07, 604 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
605 0x85, 0x99, 0x00, 0x08, 0x07, 0x85), 605 0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
606 PHYREGS(0x4C08, 0x4808, 0x4408, 0xEE01, 0xEF01, 0xF001), 606 PHYREGS(0x084C, 0x0848, 0x0844, 0x01EE, 0x01EF, 0x01F0),
607 }, 607 },
608 { .channel = 62, 608 { .channel = 62,
609 .freq = 5310, /* MHz */ 609 .freq = 5310, /* MHz */
@@ -611,7 +611,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
611 RADIOREGS(0x71, 0x02, 0x13, 0x09, 0x8A, 0x01, 0x04, 0x0A, 611 RADIOREGS(0x71, 0x02, 0x13, 0x09, 0x8A, 0x01, 0x04, 0x0A,
612 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07, 612 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
613 0x85, 0x99, 0x00, 0x08, 0x07, 0x85), 613 0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
614 PHYREGS(0x5008, 0x4C08, 0x4808, 0xED01, 0xEE01, 0xEF01), 614 PHYREGS(0x0850, 0x084C, 0x0848, 0x01ED, 0x01EE, 0x01EF),
615 }, 615 },
616 { .channel = 64, 616 { .channel = 64,
617 .freq = 5320, /* MHz */ 617 .freq = 5320, /* MHz */
@@ -619,7 +619,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
619 RADIOREGS(0x71, 0x02, 0x14, 0x09, 0x83, 0x01, 0x04, 0x0A, 619 RADIOREGS(0x71, 0x02, 0x14, 0x09, 0x83, 0x01, 0x04, 0x0A,
620 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07, 620 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
621 0x84, 0x88, 0x00, 0x07, 0x07, 0x84), 621 0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
622 PHYREGS(0x5408, 0x5008, 0x4C08, 0xEC01, 0xED01, 0xEE01), 622 PHYREGS(0x0854, 0x0850, 0x084C, 0x01EC, 0x01ED, 0x01EE),
623 }, 623 },
624 { .channel = 66, 624 { .channel = 66,
625 .freq = 5330, /* MHz */ 625 .freq = 5330, /* MHz */
@@ -627,7 +627,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
627 RADIOREGS(0x71, 0x02, 0x15, 0x09, 0x83, 0x01, 0x04, 0x0A, 627 RADIOREGS(0x71, 0x02, 0x15, 0x09, 0x83, 0x01, 0x04, 0x0A,
628 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07, 628 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
629 0x84, 0x88, 0x00, 0x07, 0x07, 0x84), 629 0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
630 PHYREGS(0x5808, 0x5408, 0x5008, 0xEB01, 0xEC01, 0xED01), 630 PHYREGS(0x0858, 0x0854, 0x0850, 0x01EB, 0x01EC, 0x01ED),
631 }, 631 },
632 { .channel = 68, 632 { .channel = 68,
633 .freq = 5340, /* MHz */ 633 .freq = 5340, /* MHz */
@@ -635,7 +635,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
635 RADIOREGS(0x71, 0x02, 0x16, 0x08, 0x7C, 0x01, 0x04, 0x0A, 635 RADIOREGS(0x71, 0x02, 0x16, 0x08, 0x7C, 0x01, 0x04, 0x0A,
636 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06, 636 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
637 0x84, 0x88, 0x00, 0x07, 0x06, 0x84), 637 0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
638 PHYREGS(0x5C08, 0x5808, 0x5408, 0xEA01, 0xEB01, 0xEC01), 638 PHYREGS(0x085C, 0x0858, 0x0854, 0x01EA, 0x01EB, 0x01EC),
639 }, 639 },
640 { .channel = 70, 640 { .channel = 70,
641 .freq = 5350, /* MHz */ 641 .freq = 5350, /* MHz */
@@ -643,7 +643,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
643 RADIOREGS(0x71, 0x02, 0x17, 0x08, 0x7C, 0x01, 0x04, 0x0A, 643 RADIOREGS(0x71, 0x02, 0x17, 0x08, 0x7C, 0x01, 0x04, 0x0A,
644 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06, 644 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
645 0x84, 0x88, 0x00, 0x07, 0x06, 0x84), 645 0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
646 PHYREGS(0x6008, 0x5C08, 0x5808, 0xE901, 0xEA01, 0xEB01), 646 PHYREGS(0x0860, 0x085C, 0x0858, 0x01E9, 0x01EA, 0x01EB),
647 }, 647 },
648 { .channel = 72, 648 { .channel = 72,
649 .freq = 5360, /* MHz */ 649 .freq = 5360, /* MHz */
@@ -651,7 +651,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
651 RADIOREGS(0x71, 0x02, 0x18, 0x08, 0x75, 0x01, 0x04, 0x0A, 651 RADIOREGS(0x71, 0x02, 0x18, 0x08, 0x75, 0x01, 0x04, 0x0A,
652 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05, 652 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
653 0x83, 0x77, 0x00, 0x06, 0x05, 0x83), 653 0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
654 PHYREGS(0x6408, 0x6008, 0x5C08, 0xE801, 0xE901, 0xEA01), 654 PHYREGS(0x0864, 0x0860, 0x085C, 0x01E8, 0x01E9, 0x01EA),
655 }, 655 },
656 { .channel = 74, 656 { .channel = 74,
657 .freq = 5370, /* MHz */ 657 .freq = 5370, /* MHz */
@@ -659,7 +659,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
659 RADIOREGS(0x71, 0x02, 0x19, 0x08, 0x75, 0x01, 0x04, 0x0A, 659 RADIOREGS(0x71, 0x02, 0x19, 0x08, 0x75, 0x01, 0x04, 0x0A,
660 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05, 660 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
661 0x83, 0x77, 0x00, 0x06, 0x05, 0x83), 661 0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
662 PHYREGS(0x6808, 0x6408, 0x6008, 0xE701, 0xE801, 0xE901), 662 PHYREGS(0x0868, 0x0864, 0x0860, 0x01E7, 0x01E8, 0x01E9),
663 }, 663 },
664 { .channel = 76, 664 { .channel = 76,
665 .freq = 5380, /* MHz */ 665 .freq = 5380, /* MHz */
@@ -667,7 +667,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
667 RADIOREGS(0x71, 0x02, 0x1A, 0x08, 0x6E, 0x01, 0x04, 0x0A, 667 RADIOREGS(0x71, 0x02, 0x1A, 0x08, 0x6E, 0x01, 0x04, 0x0A,
668 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04, 668 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
669 0x82, 0x77, 0x00, 0x06, 0x04, 0x82), 669 0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
670 PHYREGS(0x6C08, 0x6808, 0x6408, 0xE601, 0xE701, 0xE801), 670 PHYREGS(0x086C, 0x0868, 0x0864, 0x01E6, 0x01E7, 0x01E8),
671 }, 671 },
672 { .channel = 78, 672 { .channel = 78,
673 .freq = 5390, /* MHz */ 673 .freq = 5390, /* MHz */
@@ -675,7 +675,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
675 RADIOREGS(0x71, 0x02, 0x1B, 0x08, 0x6E, 0x01, 0x04, 0x0A, 675 RADIOREGS(0x71, 0x02, 0x1B, 0x08, 0x6E, 0x01, 0x04, 0x0A,
676 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04, 676 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
677 0x82, 0x77, 0x00, 0x06, 0x04, 0x82), 677 0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
678 PHYREGS(0x7008, 0x6C08, 0x6808, 0xE501, 0xE601, 0xE701), 678 PHYREGS(0x0870, 0x086C, 0x0868, 0x01E5, 0x01E6, 0x01E7),
679 }, 679 },
680 { .channel = 80, 680 { .channel = 80,
681 .freq = 5400, /* MHz */ 681 .freq = 5400, /* MHz */
@@ -683,7 +683,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
683 RADIOREGS(0x71, 0x02, 0x1C, 0x07, 0x67, 0x01, 0x04, 0x0A, 683 RADIOREGS(0x71, 0x02, 0x1C, 0x07, 0x67, 0x01, 0x04, 0x0A,
684 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04, 684 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
685 0x81, 0x66, 0x00, 0x05, 0x04, 0x81), 685 0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
686 PHYREGS(0x7408, 0x7008, 0x6C08, 0xE501, 0xE501, 0xE601), 686 PHYREGS(0x0874, 0x0870, 0x086C, 0x01E5, 0x01E5, 0x01E6),
687 }, 687 },
688 { .channel = 82, 688 { .channel = 82,
689 .freq = 5410, /* MHz */ 689 .freq = 5410, /* MHz */
@@ -691,7 +691,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
691 RADIOREGS(0x71, 0x02, 0x1D, 0x07, 0x67, 0x01, 0x04, 0x0A, 691 RADIOREGS(0x71, 0x02, 0x1D, 0x07, 0x67, 0x01, 0x04, 0x0A,
692 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04, 692 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
693 0x81, 0x66, 0x00, 0x05, 0x04, 0x81), 693 0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
694 PHYREGS(0x7808, 0x7408, 0x7008, 0xE401, 0xE501, 0xE501), 694 PHYREGS(0x0878, 0x0874, 0x0870, 0x01E4, 0x01E5, 0x01E5),
695 }, 695 },
696 { .channel = 84, 696 { .channel = 84,
697 .freq = 5420, /* MHz */ 697 .freq = 5420, /* MHz */
@@ -699,7 +699,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
699 RADIOREGS(0x71, 0x02, 0x1E, 0x07, 0x61, 0x01, 0x04, 0x0A, 699 RADIOREGS(0x71, 0x02, 0x1E, 0x07, 0x61, 0x01, 0x04, 0x0A,
700 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03, 700 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
701 0x80, 0x66, 0x00, 0x05, 0x03, 0x80), 701 0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
702 PHYREGS(0x7C08, 0x7808, 0x7408, 0xE301, 0xE401, 0xE501), 702 PHYREGS(0x087C, 0x0878, 0x0874, 0x01E3, 0x01E4, 0x01E5),
703 }, 703 },
704 { .channel = 86, 704 { .channel = 86,
705 .freq = 5430, /* MHz */ 705 .freq = 5430, /* MHz */
@@ -707,7 +707,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
707 RADIOREGS(0x71, 0x02, 0x1F, 0x07, 0x61, 0x01, 0x04, 0x0A, 707 RADIOREGS(0x71, 0x02, 0x1F, 0x07, 0x61, 0x01, 0x04, 0x0A,
708 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03, 708 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
709 0x80, 0x66, 0x00, 0x05, 0x03, 0x80), 709 0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
710 PHYREGS(0x8008, 0x7C08, 0x7808, 0xE201, 0xE301, 0xE401), 710 PHYREGS(0x0880, 0x087C, 0x0878, 0x01E2, 0x01E3, 0x01E4),
711 }, 711 },
712 { .channel = 88, 712 { .channel = 88,
713 .freq = 5440, /* MHz */ 713 .freq = 5440, /* MHz */
@@ -715,7 +715,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
715 RADIOREGS(0x71, 0x02, 0x20, 0x07, 0x5A, 0x01, 0x04, 0x0A, 715 RADIOREGS(0x71, 0x02, 0x20, 0x07, 0x5A, 0x01, 0x04, 0x0A,
716 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02, 716 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
717 0x80, 0x55, 0x00, 0x04, 0x02, 0x80), 717 0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
718 PHYREGS(0x8408, 0x8008, 0x7C08, 0xE101, 0xE201, 0xE301), 718 PHYREGS(0x0884, 0x0880, 0x087C, 0x01E1, 0x01E2, 0x01E3),
719 }, 719 },
720 { .channel = 90, 720 { .channel = 90,
721 .freq = 5450, /* MHz */ 721 .freq = 5450, /* MHz */
@@ -723,7 +723,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
723 RADIOREGS(0x71, 0x02, 0x21, 0x07, 0x5A, 0x01, 0x04, 0x0A, 723 RADIOREGS(0x71, 0x02, 0x21, 0x07, 0x5A, 0x01, 0x04, 0x0A,
724 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02, 724 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
725 0x80, 0x55, 0x00, 0x04, 0x02, 0x80), 725 0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
726 PHYREGS(0x8808, 0x8408, 0x8008, 0xE001, 0xE101, 0xE201), 726 PHYREGS(0x0888, 0x0884, 0x0880, 0x01E0, 0x01E1, 0x01E2),
727 }, 727 },
728 { .channel = 92, 728 { .channel = 92,
729 .freq = 5460, /* MHz */ 729 .freq = 5460, /* MHz */
@@ -731,7 +731,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
731 RADIOREGS(0x71, 0x02, 0x22, 0x06, 0x53, 0x01, 0x04, 0x0A, 731 RADIOREGS(0x71, 0x02, 0x22, 0x06, 0x53, 0x01, 0x04, 0x0A,
732 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01, 732 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
733 0x80, 0x55, 0x00, 0x04, 0x01, 0x80), 733 0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
734 PHYREGS(0x8C08, 0x8808, 0x8408, 0xDF01, 0xE001, 0xE101), 734 PHYREGS(0x088C, 0x0888, 0x0884, 0x01DF, 0x01E0, 0x01E1),
735 }, 735 },
736 { .channel = 94, 736 { .channel = 94,
737 .freq = 5470, /* MHz */ 737 .freq = 5470, /* MHz */
@@ -739,7 +739,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
739 RADIOREGS(0x71, 0x02, 0x23, 0x06, 0x53, 0x01, 0x04, 0x0A, 739 RADIOREGS(0x71, 0x02, 0x23, 0x06, 0x53, 0x01, 0x04, 0x0A,
740 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01, 740 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
741 0x80, 0x55, 0x00, 0x04, 0x01, 0x80), 741 0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
742 PHYREGS(0x9008, 0x8C08, 0x8808, 0xDE01, 0xDF01, 0xE001), 742 PHYREGS(0x0890, 0x088C, 0x0888, 0x01DE, 0x01DF, 0x01E0),
743 }, 743 },
744 { .channel = 96, 744 { .channel = 96,
745 .freq = 5480, /* MHz */ 745 .freq = 5480, /* MHz */
@@ -747,7 +747,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
747 RADIOREGS(0x71, 0x02, 0x24, 0x06, 0x4D, 0x01, 0x04, 0x0A, 747 RADIOREGS(0x71, 0x02, 0x24, 0x06, 0x4D, 0x01, 0x04, 0x0A,
748 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00, 748 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
749 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), 749 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
750 PHYREGS(0x9408, 0x9008, 0x8C08, 0xDD01, 0xDE01, 0xDF01), 750 PHYREGS(0x0894, 0x0890, 0x088C, 0x01DD, 0x01DE, 0x01DF),
751 }, 751 },
752 { .channel = 98, 752 { .channel = 98,
753 .freq = 5490, /* MHz */ 753 .freq = 5490, /* MHz */
@@ -755,7 +755,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
755 RADIOREGS(0x71, 0x02, 0x25, 0x06, 0x4D, 0x01, 0x04, 0x0A, 755 RADIOREGS(0x71, 0x02, 0x25, 0x06, 0x4D, 0x01, 0x04, 0x0A,
756 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00, 756 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
757 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), 757 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
758 PHYREGS(0x9808, 0x9408, 0x9008, 0xDD01, 0xDD01, 0xDE01), 758 PHYREGS(0x0898, 0x0894, 0x0890, 0x01DD, 0x01DD, 0x01DE),
759 }, 759 },
760 { .channel = 100, 760 { .channel = 100,
761 .freq = 5500, /* MHz */ 761 .freq = 5500, /* MHz */
@@ -763,7 +763,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
763 RADIOREGS(0x71, 0x02, 0x26, 0x06, 0x47, 0x01, 0x04, 0x0A, 763 RADIOREGS(0x71, 0x02, 0x26, 0x06, 0x47, 0x01, 0x04, 0x0A,
764 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00, 764 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
765 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), 765 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
766 PHYREGS(0x9C08, 0x9808, 0x9408, 0xDC01, 0xDD01, 0xDD01), 766 PHYREGS(0x089C, 0x0898, 0x0894, 0x01DC, 0x01DD, 0x01DD),
767 }, 767 },
768 { .channel = 102, 768 { .channel = 102,
769 .freq = 5510, /* MHz */ 769 .freq = 5510, /* MHz */
@@ -771,7 +771,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
771 RADIOREGS(0x71, 0x02, 0x27, 0x06, 0x47, 0x01, 0x04, 0x0A, 771 RADIOREGS(0x71, 0x02, 0x27, 0x06, 0x47, 0x01, 0x04, 0x0A,
772 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00, 772 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
773 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), 773 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
774 PHYREGS(0xA008, 0x9C08, 0x9808, 0xDB01, 0xDC01, 0xDD01), 774 PHYREGS(0x08A0, 0x089C, 0x0898, 0x01DB, 0x01DC, 0x01DD),
775 }, 775 },
776 { .channel = 104, 776 { .channel = 104,
777 .freq = 5520, /* MHz */ 777 .freq = 5520, /* MHz */
@@ -779,7 +779,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
779 RADIOREGS(0x71, 0x02, 0x28, 0x05, 0x40, 0x01, 0x04, 0x0A, 779 RADIOREGS(0x71, 0x02, 0x28, 0x05, 0x40, 0x01, 0x04, 0x0A,
780 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 780 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
781 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), 781 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
782 PHYREGS(0xA408, 0xA008, 0x9C08, 0xDA01, 0xDB01, 0xDC01), 782 PHYREGS(0x08A4, 0x08A0, 0x089C, 0x01DA, 0x01DB, 0x01DC),
783 }, 783 },
784 { .channel = 106, 784 { .channel = 106,
785 .freq = 5530, /* MHz */ 785 .freq = 5530, /* MHz */
@@ -787,7 +787,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
787 RADIOREGS(0x71, 0x02, 0x29, 0x05, 0x40, 0x01, 0x04, 0x0A, 787 RADIOREGS(0x71, 0x02, 0x29, 0x05, 0x40, 0x01, 0x04, 0x0A,
788 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 788 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
789 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), 789 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
790 PHYREGS(0xA808, 0xA408, 0xA008, 0xD901, 0xDA01, 0xDB01), 790 PHYREGS(0x08A8, 0x08A4, 0x08A0, 0x01D9, 0x01DA, 0x01DB),
791 }, 791 },
792 { .channel = 108, 792 { .channel = 108,
793 .freq = 5540, /* MHz */ 793 .freq = 5540, /* MHz */
@@ -795,7 +795,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
795 RADIOREGS(0x71, 0x02, 0x2A, 0x05, 0x3A, 0x01, 0x04, 0x0A, 795 RADIOREGS(0x71, 0x02, 0x2A, 0x05, 0x3A, 0x01, 0x04, 0x0A,
796 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 796 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
797 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), 797 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
798 PHYREGS(0xAC08, 0xA808, 0xA408, 0xD801, 0xD901, 0xDA01), 798 PHYREGS(0x08AC, 0x08A8, 0x08A4, 0x01D8, 0x01D9, 0x01DA),
799 }, 799 },
800 { .channel = 110, 800 { .channel = 110,
801 .freq = 5550, /* MHz */ 801 .freq = 5550, /* MHz */
@@ -803,7 +803,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
803 RADIOREGS(0x71, 0x02, 0x2B, 0x05, 0x3A, 0x01, 0x04, 0x0A, 803 RADIOREGS(0x71, 0x02, 0x2B, 0x05, 0x3A, 0x01, 0x04, 0x0A,
804 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 804 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
805 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), 805 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
806 PHYREGS(0xB008, 0xAC08, 0xA808, 0xD701, 0xD801, 0xD901), 806 PHYREGS(0x08B0, 0x08AC, 0x08A8, 0x01D7, 0x01D8, 0x01D9),
807 }, 807 },
808 { .channel = 112, 808 { .channel = 112,
809 .freq = 5560, /* MHz */ 809 .freq = 5560, /* MHz */
@@ -811,7 +811,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
811 RADIOREGS(0x71, 0x02, 0x2C, 0x05, 0x34, 0x01, 0x04, 0x0A, 811 RADIOREGS(0x71, 0x02, 0x2C, 0x05, 0x34, 0x01, 0x04, 0x0A,
812 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, 812 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
813 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), 813 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
814 PHYREGS(0xB408, 0xB008, 0xAC08, 0xD701, 0xD701, 0xD801), 814 PHYREGS(0x08B4, 0x08B0, 0x08AC, 0x01D7, 0x01D7, 0x01D8),
815 }, 815 },
816 { .channel = 114, 816 { .channel = 114,
817 .freq = 5570, /* MHz */ 817 .freq = 5570, /* MHz */
@@ -819,7 +819,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
819 RADIOREGS(0x71, 0x02, 0x2D, 0x05, 0x34, 0x01, 0x04, 0x0A, 819 RADIOREGS(0x71, 0x02, 0x2D, 0x05, 0x34, 0x01, 0x04, 0x0A,
820 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, 820 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
821 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), 821 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
822 PHYREGS(0xB808, 0xB408, 0xB008, 0xD601, 0xD701, 0xD701), 822 PHYREGS(0x08B8, 0x08B4, 0x08B0, 0x01D6, 0x01D7, 0x01D7),
823 }, 823 },
824 { .channel = 116, 824 { .channel = 116,
825 .freq = 5580, /* MHz */ 825 .freq = 5580, /* MHz */
@@ -827,7 +827,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
827 RADIOREGS(0x71, 0x02, 0x2E, 0x04, 0x2E, 0x01, 0x04, 0x0A, 827 RADIOREGS(0x71, 0x02, 0x2E, 0x04, 0x2E, 0x01, 0x04, 0x0A,
828 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, 828 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
829 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), 829 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
830 PHYREGS(0xBC08, 0xB808, 0xB408, 0xD501, 0xD601, 0xD701), 830 PHYREGS(0x08BC, 0x08B8, 0x08B4, 0x01D5, 0x01D6, 0x01D7),
831 }, 831 },
832 { .channel = 118, 832 { .channel = 118,
833 .freq = 5590, /* MHz */ 833 .freq = 5590, /* MHz */
@@ -835,7 +835,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
835 RADIOREGS(0x71, 0x02, 0x2F, 0x04, 0x2E, 0x01, 0x04, 0x0A, 835 RADIOREGS(0x71, 0x02, 0x2F, 0x04, 0x2E, 0x01, 0x04, 0x0A,
836 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, 836 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
837 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), 837 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
838 PHYREGS(0xC008, 0xBC08, 0xB808, 0xD401, 0xD501, 0xD601), 838 PHYREGS(0x08C0, 0x08BC, 0x08B8, 0x01D4, 0x01D5, 0x01D6),
839 }, 839 },
840 { .channel = 120, 840 { .channel = 120,
841 .freq = 5600, /* MHz */ 841 .freq = 5600, /* MHz */
@@ -843,7 +843,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
843 RADIOREGS(0x71, 0x02, 0x30, 0x04, 0x28, 0x01, 0x04, 0x0A, 843 RADIOREGS(0x71, 0x02, 0x30, 0x04, 0x28, 0x01, 0x04, 0x0A,
844 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00, 844 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
845 0x80, 0x11, 0x00, 0x01, 0x00, 0x80), 845 0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
846 PHYREGS(0xC408, 0xC008, 0xBC08, 0xD301, 0xD401, 0xD501), 846 PHYREGS(0x08C4, 0x08C0, 0x08BC, 0x01D3, 0x01D4, 0x01D5),
847 }, 847 },
848 { .channel = 122, 848 { .channel = 122,
849 .freq = 5610, /* MHz */ 849 .freq = 5610, /* MHz */
@@ -851,7 +851,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
851 RADIOREGS(0x71, 0x02, 0x31, 0x04, 0x28, 0x01, 0x04, 0x0A, 851 RADIOREGS(0x71, 0x02, 0x31, 0x04, 0x28, 0x01, 0x04, 0x0A,
852 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00, 852 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
853 0x80, 0x11, 0x00, 0x01, 0x00, 0x80), 853 0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
854 PHYREGS(0xC808, 0xC408, 0xC008, 0xD201, 0xD301, 0xD401), 854 PHYREGS(0x08C8, 0x08C4, 0x08C0, 0x01D2, 0x01D3, 0x01D4),
855 }, 855 },
856 { .channel = 124, 856 { .channel = 124,
857 .freq = 5620, /* MHz */ 857 .freq = 5620, /* MHz */
@@ -859,7 +859,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
859 RADIOREGS(0x71, 0x02, 0x32, 0x04, 0x21, 0x01, 0x04, 0x0A, 859 RADIOREGS(0x71, 0x02, 0x32, 0x04, 0x21, 0x01, 0x04, 0x0A,
860 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 860 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
861 0x80, 0x11, 0x00, 0x00, 0x00, 0x80), 861 0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
862 PHYREGS(0xCC08, 0xC808, 0xC408, 0xD201, 0xD201, 0xD301), 862 PHYREGS(0x08CC, 0x08C8, 0x08C4, 0x01D2, 0x01D2, 0x01D3),
863 }, 863 },
864 { .channel = 126, 864 { .channel = 126,
865 .freq = 5630, /* MHz */ 865 .freq = 5630, /* MHz */
@@ -867,7 +867,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
867 RADIOREGS(0x71, 0x02, 0x33, 0x04, 0x21, 0x01, 0x04, 0x0A, 867 RADIOREGS(0x71, 0x02, 0x33, 0x04, 0x21, 0x01, 0x04, 0x0A,
868 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 868 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
869 0x80, 0x11, 0x00, 0x00, 0x00, 0x80), 869 0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
870 PHYREGS(0xD008, 0xCC08, 0xC808, 0xD101, 0xD201, 0xD201), 870 PHYREGS(0x08D0, 0x08CC, 0x08C8, 0x01D1, 0x01D2, 0x01D2),
871 }, 871 },
872 { .channel = 128, 872 { .channel = 128,
873 .freq = 5640, /* MHz */ 873 .freq = 5640, /* MHz */
@@ -875,7 +875,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
875 RADIOREGS(0x71, 0x02, 0x34, 0x03, 0x1C, 0x01, 0x04, 0x0A, 875 RADIOREGS(0x71, 0x02, 0x34, 0x03, 0x1C, 0x01, 0x04, 0x0A,
876 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 876 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
877 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 877 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
878 PHYREGS(0xD408, 0xD008, 0xCC08, 0xD001, 0xD101, 0xD201), 878 PHYREGS(0x08D4, 0x08D0, 0x08CC, 0x01D0, 0x01D1, 0x01D2),
879 }, 879 },
880 { .channel = 130, 880 { .channel = 130,
881 .freq = 5650, /* MHz */ 881 .freq = 5650, /* MHz */
@@ -883,7 +883,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
883 RADIOREGS(0x71, 0x02, 0x35, 0x03, 0x1C, 0x01, 0x04, 0x0A, 883 RADIOREGS(0x71, 0x02, 0x35, 0x03, 0x1C, 0x01, 0x04, 0x0A,
884 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 884 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
885 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 885 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
886 PHYREGS(0xD808, 0xD408, 0xD008, 0xCF01, 0xD001, 0xD101), 886 PHYREGS(0x08D8, 0x08D4, 0x08D0, 0x01CF, 0x01D0, 0x01D1),
887 }, 887 },
888 { .channel = 132, 888 { .channel = 132,
889 .freq = 5660, /* MHz */ 889 .freq = 5660, /* MHz */
@@ -891,7 +891,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
891 RADIOREGS(0x71, 0x02, 0x36, 0x03, 0x16, 0x01, 0x04, 0x0A, 891 RADIOREGS(0x71, 0x02, 0x36, 0x03, 0x16, 0x01, 0x04, 0x0A,
892 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 892 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
893 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 893 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
894 PHYREGS(0xDC08, 0xD808, 0xD408, 0xCE01, 0xCF01, 0xD001), 894 PHYREGS(0x08DC, 0x08D8, 0x08D4, 0x01CE, 0x01CF, 0x01D0),
895 }, 895 },
896 { .channel = 134, 896 { .channel = 134,
897 .freq = 5670, /* MHz */ 897 .freq = 5670, /* MHz */
@@ -899,7 +899,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
899 RADIOREGS(0x71, 0x02, 0x37, 0x03, 0x16, 0x01, 0x04, 0x0A, 899 RADIOREGS(0x71, 0x02, 0x37, 0x03, 0x16, 0x01, 0x04, 0x0A,
900 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 900 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
901 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 901 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
902 PHYREGS(0xE008, 0xDC08, 0xD808, 0xCE01, 0xCE01, 0xCF01), 902 PHYREGS(0x08E0, 0x08DC, 0x08D8, 0x01CE, 0x01CE, 0x01CF),
903 }, 903 },
904 { .channel = 136, 904 { .channel = 136,
905 .freq = 5680, /* MHz */ 905 .freq = 5680, /* MHz */
@@ -907,7 +907,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
907 RADIOREGS(0x71, 0x02, 0x38, 0x03, 0x10, 0x01, 0x04, 0x0A, 907 RADIOREGS(0x71, 0x02, 0x38, 0x03, 0x10, 0x01, 0x04, 0x0A,
908 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 908 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
909 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 909 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
910 PHYREGS(0xE408, 0xE008, 0xDC08, 0xCD01, 0xCE01, 0xCE01), 910 PHYREGS(0x08E4, 0x08E0, 0x08DC, 0x01CD, 0x01CE, 0x01CE),
911 }, 911 },
912 { .channel = 138, 912 { .channel = 138,
913 .freq = 5690, /* MHz */ 913 .freq = 5690, /* MHz */
@@ -915,7 +915,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
915 RADIOREGS(0x71, 0x02, 0x39, 0x03, 0x10, 0x01, 0x04, 0x0A, 915 RADIOREGS(0x71, 0x02, 0x39, 0x03, 0x10, 0x01, 0x04, 0x0A,
916 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 916 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
917 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 917 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
918 PHYREGS(0xE808, 0xE408, 0xE008, 0xCC01, 0xCD01, 0xCE01), 918 PHYREGS(0x08E8, 0x08E4, 0x08E0, 0x01CC, 0x01CD, 0x01CE),
919 }, 919 },
920 { .channel = 140, 920 { .channel = 140,
921 .freq = 5700, /* MHz */ 921 .freq = 5700, /* MHz */
@@ -923,7 +923,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
923 RADIOREGS(0x71, 0x02, 0x3A, 0x02, 0x0A, 0x01, 0x04, 0x0A, 923 RADIOREGS(0x71, 0x02, 0x3A, 0x02, 0x0A, 0x01, 0x04, 0x0A,
924 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 924 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
925 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 925 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
926 PHYREGS(0xEC08, 0xE808, 0xE408, 0xCB01, 0xCC01, 0xCD01), 926 PHYREGS(0x08EC, 0x08E8, 0x08E4, 0x01CB, 0x01CC, 0x01CD),
927 }, 927 },
928 { .channel = 142, 928 { .channel = 142,
929 .freq = 5710, /* MHz */ 929 .freq = 5710, /* MHz */
@@ -931,7 +931,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
931 RADIOREGS(0x71, 0x02, 0x3B, 0x02, 0x0A, 0x01, 0x04, 0x0A, 931 RADIOREGS(0x71, 0x02, 0x3B, 0x02, 0x0A, 0x01, 0x04, 0x0A,
932 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 932 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
933 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 933 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
934 PHYREGS(0xF008, 0xEC08, 0xE808, 0xCA01, 0xCB01, 0xCC01), 934 PHYREGS(0x08F0, 0x08EC, 0x08E8, 0x01CA, 0x01CB, 0x01CC),
935 }, 935 },
936 { .channel = 144, 936 { .channel = 144,
937 .freq = 5720, /* MHz */ 937 .freq = 5720, /* MHz */
@@ -939,7 +939,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
939 RADIOREGS(0x71, 0x02, 0x3C, 0x02, 0x0A, 0x01, 0x04, 0x0A, 939 RADIOREGS(0x71, 0x02, 0x3C, 0x02, 0x0A, 0x01, 0x04, 0x0A,
940 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 940 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
941 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 941 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
942 PHYREGS(0xF408, 0xF008, 0xEC08, 0xC901, 0xCA01, 0xCB01), 942 PHYREGS(0x08F4, 0x08F0, 0x08EC, 0x01C9, 0x01CA, 0x01CB),
943 }, 943 },
944 { .channel = 145, 944 { .channel = 145,
945 .freq = 5725, /* MHz */ 945 .freq = 5725, /* MHz */
@@ -947,7 +947,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
947 RADIOREGS(0x72, 0x04, 0x79, 0x02, 0x03, 0x01, 0x03, 0x14, 947 RADIOREGS(0x72, 0x04, 0x79, 0x02, 0x03, 0x01, 0x03, 0x14,
948 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 948 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
949 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 949 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
950 PHYREGS(0xF608, 0xF208, 0xEE08, 0xC901, 0xCA01, 0xCB01), 950 PHYREGS(0x08F6, 0x08F2, 0x08EE, 0x01C9, 0x01CA, 0x01CB),
951 }, 951 },
952 { .channel = 146, 952 { .channel = 146,
953 .freq = 5730, /* MHz */ 953 .freq = 5730, /* MHz */
@@ -955,7 +955,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
955 RADIOREGS(0x71, 0x02, 0x3D, 0x02, 0x0A, 0x01, 0x04, 0x0A, 955 RADIOREGS(0x71, 0x02, 0x3D, 0x02, 0x0A, 0x01, 0x04, 0x0A,
956 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 956 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
957 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 957 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
958 PHYREGS(0xF808, 0xF408, 0xF008, 0xC901, 0xC901, 0xCA01), 958 PHYREGS(0x08F8, 0x08F4, 0x08F0, 0x01C9, 0x01C9, 0x01CA),
959 }, 959 },
960 { .channel = 147, 960 { .channel = 147,
961 .freq = 5735, /* MHz */ 961 .freq = 5735, /* MHz */
@@ -963,7 +963,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
963 RADIOREGS(0x72, 0x04, 0x7B, 0x02, 0x03, 0x01, 0x03, 0x14, 963 RADIOREGS(0x72, 0x04, 0x7B, 0x02, 0x03, 0x01, 0x03, 0x14,
964 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 964 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
965 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 965 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
966 PHYREGS(0xFA08, 0xF608, 0xF208, 0xC801, 0xC901, 0xCA01), 966 PHYREGS(0x08FA, 0x08F6, 0x08F2, 0x01C8, 0x01C9, 0x01CA),
967 }, 967 },
968 { .channel = 148, 968 { .channel = 148,
969 .freq = 5740, /* MHz */ 969 .freq = 5740, /* MHz */
@@ -971,7 +971,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
971 RADIOREGS(0x71, 0x02, 0x3E, 0x02, 0x0A, 0x01, 0x04, 0x0A, 971 RADIOREGS(0x71, 0x02, 0x3E, 0x02, 0x0A, 0x01, 0x04, 0x0A,
972 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 972 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
973 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 973 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
974 PHYREGS(0xFC08, 0xF808, 0xF408, 0xC801, 0xC901, 0xC901), 974 PHYREGS(0x08FC, 0x08F8, 0x08F4, 0x01C8, 0x01C9, 0x01C9),
975 }, 975 },
976 { .channel = 149, 976 { .channel = 149,
977 .freq = 5745, /* MHz */ 977 .freq = 5745, /* MHz */
@@ -979,7 +979,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
979 RADIOREGS(0x72, 0x04, 0x7D, 0x02, 0xFE, 0x00, 0x03, 0x14, 979 RADIOREGS(0x72, 0x04, 0x7D, 0x02, 0xFE, 0x00, 0x03, 0x14,
980 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 980 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
981 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 981 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
982 PHYREGS(0xFE08, 0xFA08, 0xF608, 0xC801, 0xC801, 0xC901), 982 PHYREGS(0x08FE, 0x08FA, 0x08F6, 0x01C8, 0x01C8, 0x01C9),
983 }, 983 },
984 { .channel = 150, 984 { .channel = 150,
985 .freq = 5750, /* MHz */ 985 .freq = 5750, /* MHz */
@@ -987,7 +987,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
987 RADIOREGS(0x71, 0x02, 0x3F, 0x02, 0x0A, 0x01, 0x04, 0x0A, 987 RADIOREGS(0x71, 0x02, 0x3F, 0x02, 0x0A, 0x01, 0x04, 0x0A,
988 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 988 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
989 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 989 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
990 PHYREGS(0x0009, 0xFC08, 0xF808, 0xC701, 0xC801, 0xC901), 990 PHYREGS(0x0900, 0x08FC, 0x08F8, 0x01C7, 0x01C8, 0x01C9),
991 }, 991 },
992 { .channel = 151, 992 { .channel = 151,
993 .freq = 5755, /* MHz */ 993 .freq = 5755, /* MHz */
@@ -995,7 +995,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
995 RADIOREGS(0x72, 0x04, 0x7F, 0x02, 0xFE, 0x00, 0x03, 0x14, 995 RADIOREGS(0x72, 0x04, 0x7F, 0x02, 0xFE, 0x00, 0x03, 0x14,
996 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 996 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
997 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 997 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
998 PHYREGS(0x0209, 0xFE08, 0xFA08, 0xC701, 0xC801, 0xC801), 998 PHYREGS(0x0902, 0x08FE, 0x08FA, 0x01C7, 0x01C8, 0x01C8),
999 }, 999 },
1000 { .channel = 152, 1000 { .channel = 152,
1001 .freq = 5760, /* MHz */ 1001 .freq = 5760, /* MHz */
@@ -1003,7 +1003,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1003 RADIOREGS(0x71, 0x02, 0x40, 0x02, 0x0A, 0x01, 0x04, 0x0A, 1003 RADIOREGS(0x71, 0x02, 0x40, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1004 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1004 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1005 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1005 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1006 PHYREGS(0x0409, 0x0009, 0xFC08, 0xC601, 0xC701, 0xC801), 1006 PHYREGS(0x0904, 0x0900, 0x08FC, 0x01C6, 0x01C7, 0x01C8),
1007 }, 1007 },
1008 { .channel = 153, 1008 { .channel = 153,
1009 .freq = 5765, /* MHz */ 1009 .freq = 5765, /* MHz */
@@ -1011,7 +1011,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1011 RADIOREGS(0x72, 0x04, 0x81, 0x02, 0xF8, 0x00, 0x03, 0x14, 1011 RADIOREGS(0x72, 0x04, 0x81, 0x02, 0xF8, 0x00, 0x03, 0x14,
1012 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1012 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1013 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1013 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1014 PHYREGS(0x0609, 0x0209, 0xFE08, 0xC601, 0xC701, 0xC801), 1014 PHYREGS(0x0906, 0x0902, 0x08FE, 0x01C6, 0x01C7, 0x01C8),
1015 }, 1015 },
1016 { .channel = 154, 1016 { .channel = 154,
1017 .freq = 5770, /* MHz */ 1017 .freq = 5770, /* MHz */
@@ -1019,7 +1019,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1019 RADIOREGS(0x71, 0x02, 0x41, 0x02, 0x0A, 0x01, 0x04, 0x0A, 1019 RADIOREGS(0x71, 0x02, 0x41, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1020 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1020 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1021 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1021 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1022 PHYREGS(0x0809, 0x0409, 0x0009, 0xC601, 0xC601, 0xC701), 1022 PHYREGS(0x0908, 0x0904, 0x0900, 0x01C6, 0x01C6, 0x01C7),
1023 }, 1023 },
1024 { .channel = 155, 1024 { .channel = 155,
1025 .freq = 5775, /* MHz */ 1025 .freq = 5775, /* MHz */
@@ -1027,7 +1027,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1027 RADIOREGS(0x72, 0x04, 0x83, 0x02, 0xF8, 0x00, 0x03, 0x14, 1027 RADIOREGS(0x72, 0x04, 0x83, 0x02, 0xF8, 0x00, 0x03, 0x14,
1028 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1028 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1029 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1029 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1030 PHYREGS(0x0A09, 0x0609, 0x0209, 0xC501, 0xC601, 0xC701), 1030 PHYREGS(0x090A, 0x0906, 0x0902, 0x01C5, 0x01C6, 0x01C7),
1031 }, 1031 },
1032 { .channel = 156, 1032 { .channel = 156,
1033 .freq = 5780, /* MHz */ 1033 .freq = 5780, /* MHz */
@@ -1035,7 +1035,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1035 RADIOREGS(0x71, 0x02, 0x42, 0x02, 0x0A, 0x01, 0x04, 0x0A, 1035 RADIOREGS(0x71, 0x02, 0x42, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1036 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1036 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1037 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1037 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1038 PHYREGS(0x0C09, 0x0809, 0x0409, 0xC501, 0xC601, 0xC601), 1038 PHYREGS(0x090C, 0x0908, 0x0904, 0x01C5, 0x01C6, 0x01C6),
1039 }, 1039 },
1040 { .channel = 157, 1040 { .channel = 157,
1041 .freq = 5785, /* MHz */ 1041 .freq = 5785, /* MHz */
@@ -1043,7 +1043,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1043 RADIOREGS(0x72, 0x04, 0x85, 0x02, 0xF2, 0x00, 0x03, 0x14, 1043 RADIOREGS(0x72, 0x04, 0x85, 0x02, 0xF2, 0x00, 0x03, 0x14,
1044 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1044 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1045 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1045 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1046 PHYREGS(0x0E09, 0x0A09, 0x0609, 0xC401, 0xC501, 0xC601), 1046 PHYREGS(0x090E, 0x090A, 0x0906, 0x01C4, 0x01C5, 0x01C6),
1047 }, 1047 },
1048 { .channel = 158, 1048 { .channel = 158,
1049 .freq = 5790, /* MHz */ 1049 .freq = 5790, /* MHz */
@@ -1051,7 +1051,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1051 RADIOREGS(0x71, 0x02, 0x43, 0x02, 0x0A, 0x01, 0x04, 0x0A, 1051 RADIOREGS(0x71, 0x02, 0x43, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1052 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1052 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1053 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1053 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1054 PHYREGS(0x1009, 0x0C09, 0x0809, 0xC401, 0xC501, 0xC601), 1054 PHYREGS(0x0910, 0x090C, 0x0908, 0x01C4, 0x01C5, 0x01C6),
1055 }, 1055 },
1056 { .channel = 159, 1056 { .channel = 159,
1057 .freq = 5795, /* MHz */ 1057 .freq = 5795, /* MHz */
@@ -1059,7 +1059,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1059 RADIOREGS(0x72, 0x04, 0x87, 0x02, 0xF2, 0x00, 0x03, 0x14, 1059 RADIOREGS(0x72, 0x04, 0x87, 0x02, 0xF2, 0x00, 0x03, 0x14,
1060 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1060 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1061 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1061 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1062 PHYREGS(0x1209, 0x0E09, 0x0A09, 0xC401, 0xC401, 0xC501), 1062 PHYREGS(0x0912, 0x090E, 0x090A, 0x01C4, 0x01C4, 0x01C5),
1063 }, 1063 },
1064 { .channel = 160, 1064 { .channel = 160,
1065 .freq = 5800, /* MHz */ 1065 .freq = 5800, /* MHz */
@@ -1067,7 +1067,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1067 RADIOREGS(0x71, 0x02, 0x44, 0x01, 0x0A, 0x01, 0x04, 0x0A, 1067 RADIOREGS(0x71, 0x02, 0x44, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1068 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1068 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1069 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1069 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1070 PHYREGS(0x1409, 0x1009, 0x0C09, 0xC301, 0xC401, 0xC501), 1070 PHYREGS(0x0914, 0x0910, 0x090C, 0x01C3, 0x01C4, 0x01C5),
1071 }, 1071 },
1072 { .channel = 161, 1072 { .channel = 161,
1073 .freq = 5805, /* MHz */ 1073 .freq = 5805, /* MHz */
@@ -1075,7 +1075,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1075 RADIOREGS(0x72, 0x04, 0x89, 0x01, 0xED, 0x00, 0x03, 0x14, 1075 RADIOREGS(0x72, 0x04, 0x89, 0x01, 0xED, 0x00, 0x03, 0x14,
1076 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1076 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1077 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1077 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1078 PHYREGS(0x1609, 0x1209, 0x0E09, 0xC301, 0xC401, 0xC401), 1078 PHYREGS(0x0916, 0x0912, 0x090E, 0x01C3, 0x01C4, 0x01C4),
1079 }, 1079 },
1080 { .channel = 162, 1080 { .channel = 162,
1081 .freq = 5810, /* MHz */ 1081 .freq = 5810, /* MHz */
@@ -1083,7 +1083,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1083 RADIOREGS(0x71, 0x02, 0x45, 0x01, 0x0A, 0x01, 0x04, 0x0A, 1083 RADIOREGS(0x71, 0x02, 0x45, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1084 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1084 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1085 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1085 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1086 PHYREGS(0x1809, 0x1409, 0x1009, 0xC201, 0xC301, 0xC401), 1086 PHYREGS(0x0918, 0x0914, 0x0910, 0x01C2, 0x01C3, 0x01C4),
1087 }, 1087 },
1088 { .channel = 163, 1088 { .channel = 163,
1089 .freq = 5815, /* MHz */ 1089 .freq = 5815, /* MHz */
@@ -1091,7 +1091,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1091 RADIOREGS(0x72, 0x04, 0x8B, 0x01, 0xED, 0x00, 0x03, 0x14, 1091 RADIOREGS(0x72, 0x04, 0x8B, 0x01, 0xED, 0x00, 0x03, 0x14,
1092 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1092 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1093 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1093 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1094 PHYREGS(0x1A09, 0x1609, 0x1209, 0xC201, 0xC301, 0xC401), 1094 PHYREGS(0x091A, 0x0916, 0x0912, 0x01C2, 0x01C3, 0x01C4),
1095 }, 1095 },
1096 { .channel = 164, 1096 { .channel = 164,
1097 .freq = 5820, /* MHz */ 1097 .freq = 5820, /* MHz */
@@ -1099,7 +1099,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1099 RADIOREGS(0x71, 0x02, 0x46, 0x01, 0x0A, 0x01, 0x04, 0x0A, 1099 RADIOREGS(0x71, 0x02, 0x46, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1100 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1100 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1101 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1101 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1102 PHYREGS(0x1C09, 0x1809, 0x1409, 0xC201, 0xC201, 0xC301), 1102 PHYREGS(0x091C, 0x0918, 0x0914, 0x01C2, 0x01C2, 0x01C3),
1103 }, 1103 },
1104 { .channel = 165, 1104 { .channel = 165,
1105 .freq = 5825, /* MHz */ 1105 .freq = 5825, /* MHz */
@@ -1107,7 +1107,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1107 RADIOREGS(0x72, 0x04, 0x8D, 0x01, 0xED, 0x00, 0x03, 0x14, 1107 RADIOREGS(0x72, 0x04, 0x8D, 0x01, 0xED, 0x00, 0x03, 0x14,
1108 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1108 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1109 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1109 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1110 PHYREGS(0x1E09, 0x1A09, 0x1609, 0xC101, 0xC201, 0xC301), 1110 PHYREGS(0x091E, 0x091A, 0x0916, 0x01C1, 0x01C2, 0x01C3),
1111 }, 1111 },
1112 { .channel = 166, 1112 { .channel = 166,
1113 .freq = 5830, /* MHz */ 1113 .freq = 5830, /* MHz */
@@ -1115,7 +1115,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1115 RADIOREGS(0x71, 0x02, 0x47, 0x01, 0x0A, 0x01, 0x04, 0x0A, 1115 RADIOREGS(0x71, 0x02, 0x47, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1116 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1116 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1117 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1117 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1118 PHYREGS(0x2009, 0x1C09, 0x1809, 0xC101, 0xC201, 0xC201), 1118 PHYREGS(0x0920, 0x091C, 0x0918, 0x01C1, 0x01C2, 0x01C2),
1119 }, 1119 },
1120 { .channel = 168, 1120 { .channel = 168,
1121 .freq = 5840, /* MHz */ 1121 .freq = 5840, /* MHz */
@@ -1123,7 +1123,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1123 RADIOREGS(0x71, 0x02, 0x48, 0x01, 0x0A, 0x01, 0x04, 0x0A, 1123 RADIOREGS(0x71, 0x02, 0x48, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1124 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1124 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1125 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1125 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1126 PHYREGS(0x2409, 0x2009, 0x1C09, 0xC001, 0xC101, 0xC201), 1126 PHYREGS(0x0924, 0x0920, 0x091C, 0x01C0, 0x01C1, 0x01C2),
1127 }, 1127 },
1128 { .channel = 170, 1128 { .channel = 170,
1129 .freq = 5850, /* MHz */ 1129 .freq = 5850, /* MHz */
@@ -1131,7 +1131,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1131 RADIOREGS(0x71, 0x02, 0x49, 0x01, 0xE0, 0x00, 0x04, 0x0A, 1131 RADIOREGS(0x71, 0x02, 0x49, 0x01, 0xE0, 0x00, 0x04, 0x0A,
1132 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1132 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1133 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1133 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1134 PHYREGS(0x2809, 0x2409, 0x2009, 0xBF01, 0xC001, 0xC101), 1134 PHYREGS(0x0928, 0x0924, 0x0920, 0x01BF, 0x01C0, 0x01C1),
1135 }, 1135 },
1136 { .channel = 172, 1136 { .channel = 172,
1137 .freq = 5860, /* MHz */ 1137 .freq = 5860, /* MHz */
@@ -1139,7 +1139,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1139 RADIOREGS(0x71, 0x02, 0x4A, 0x01, 0xDE, 0x00, 0x04, 0x0A, 1139 RADIOREGS(0x71, 0x02, 0x4A, 0x01, 0xDE, 0x00, 0x04, 0x0A,
1140 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1140 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1141 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1141 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1142 PHYREGS(0x2C09, 0x2809, 0x2409, 0xBF01, 0xBF01, 0xC001), 1142 PHYREGS(0x092C, 0x0928, 0x0924, 0x01BF, 0x01BF, 0x01C0),
1143 }, 1143 },
1144 { .channel = 174, 1144 { .channel = 174,
1145 .freq = 5870, /* MHz */ 1145 .freq = 5870, /* MHz */
@@ -1147,7 +1147,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1147 RADIOREGS(0x71, 0x02, 0x4B, 0x00, 0xDB, 0x00, 0x04, 0x0A, 1147 RADIOREGS(0x71, 0x02, 0x4B, 0x00, 0xDB, 0x00, 0x04, 0x0A,
1148 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1148 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1149 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1149 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1150 PHYREGS(0x3009, 0x2C09, 0x2809, 0xBE01, 0xBF01, 0xBF01), 1150 PHYREGS(0x0930, 0x092C, 0x0928, 0x01BE, 0x01BF, 0x01BF),
1151 }, 1151 },
1152 { .channel = 176, 1152 { .channel = 176,
1153 .freq = 5880, /* MHz */ 1153 .freq = 5880, /* MHz */
@@ -1155,7 +1155,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1155 RADIOREGS(0x71, 0x02, 0x4C, 0x00, 0xD8, 0x00, 0x04, 0x0A, 1155 RADIOREGS(0x71, 0x02, 0x4C, 0x00, 0xD8, 0x00, 0x04, 0x0A,
1156 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1156 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1157 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1157 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1158 PHYREGS(0x3409, 0x3009, 0x2C09, 0xBD01, 0xBE01, 0xBF01), 1158 PHYREGS(0x0934, 0x0930, 0x092C, 0x01BD, 0x01BE, 0x01BF),
1159 }, 1159 },
1160 { .channel = 178, 1160 { .channel = 178,
1161 .freq = 5890, /* MHz */ 1161 .freq = 5890, /* MHz */
@@ -1163,7 +1163,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1163 RADIOREGS(0x71, 0x02, 0x4D, 0x00, 0xD6, 0x00, 0x04, 0x0A, 1163 RADIOREGS(0x71, 0x02, 0x4D, 0x00, 0xD6, 0x00, 0x04, 0x0A,
1164 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1164 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1165 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1165 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1166 PHYREGS(0x3809, 0x3409, 0x3009, 0xBC01, 0xBD01, 0xBE01), 1166 PHYREGS(0x0938, 0x0934, 0x0930, 0x01BC, 0x01BD, 0x01BE),
1167 }, 1167 },
1168 { .channel = 180, 1168 { .channel = 180,
1169 .freq = 5900, /* MHz */ 1169 .freq = 5900, /* MHz */
@@ -1171,7 +1171,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1171 RADIOREGS(0x71, 0x02, 0x4E, 0x00, 0xD3, 0x00, 0x04, 0x0A, 1171 RADIOREGS(0x71, 0x02, 0x4E, 0x00, 0xD3, 0x00, 0x04, 0x0A,
1172 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1172 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1173 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1173 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1174 PHYREGS(0x3C09, 0x3809, 0x3409, 0xBC01, 0xBC01, 0xBD01), 1174 PHYREGS(0x093C, 0x0938, 0x0934, 0x01BC, 0x01BC, 0x01BD),
1175 }, 1175 },
1176 { .channel = 182, 1176 { .channel = 182,
1177 .freq = 5910, /* MHz */ 1177 .freq = 5910, /* MHz */
@@ -1179,7 +1179,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1179 RADIOREGS(0x71, 0x02, 0x4F, 0x00, 0xD6, 0x00, 0x04, 0x0A, 1179 RADIOREGS(0x71, 0x02, 0x4F, 0x00, 0xD6, 0x00, 0x04, 0x0A,
1180 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1180 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1181 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1181 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1182 PHYREGS(0x4009, 0x3C09, 0x3809, 0xBB01, 0xBC01, 0xBC01), 1182 PHYREGS(0x0940, 0x093C, 0x0938, 0x01BB, 0x01BC, 0x01BC),
1183 }, 1183 },
1184 { .channel = 1, 1184 { .channel = 1,
1185 .freq = 2412, /* MHz */ 1185 .freq = 2412, /* MHz */
@@ -1187,7 +1187,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1187 RADIOREGS(0x73, 0x09, 0x6C, 0x0F, 0x00, 0x01, 0x07, 0x15, 1187 RADIOREGS(0x73, 0x09, 0x6C, 0x0F, 0x00, 0x01, 0x07, 0x15,
1188 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0D, 0x0C, 1188 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0D, 0x0C,
1189 0x80, 0xFF, 0x88, 0x0D, 0x0C, 0x80), 1189 0x80, 0xFF, 0x88, 0x0D, 0x0C, 0x80),
1190 PHYREGS(0xC903, 0xC503, 0xC103, 0x3A04, 0x3F04, 0x4304), 1190 PHYREGS(0x03C9, 0x03C5, 0x03C1, 0x043A, 0x043F, 0x0443),
1191 }, 1191 },
1192 { .channel = 2, 1192 { .channel = 2,
1193 .freq = 2417, /* MHz */ 1193 .freq = 2417, /* MHz */
@@ -1195,7 +1195,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1195 RADIOREGS(0x73, 0x09, 0x71, 0x0F, 0x00, 0x01, 0x07, 0x15, 1195 RADIOREGS(0x73, 0x09, 0x71, 0x0F, 0x00, 0x01, 0x07, 0x15,
1196 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0B, 1196 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0B,
1197 0x80, 0xFF, 0x88, 0x0C, 0x0B, 0x80), 1197 0x80, 0xFF, 0x88, 0x0C, 0x0B, 0x80),
1198 PHYREGS(0xCB03, 0xC703, 0xC303, 0x3804, 0x3D04, 0x4104), 1198 PHYREGS(0x03CB, 0x03C7, 0x03C3, 0x0438, 0x043D, 0x0441),
1199 }, 1199 },
1200 { .channel = 3, 1200 { .channel = 3,
1201 .freq = 2422, /* MHz */ 1201 .freq = 2422, /* MHz */
@@ -1203,7 +1203,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1203 RADIOREGS(0x73, 0x09, 0x76, 0x0F, 0x00, 0x01, 0x07, 0x15, 1203 RADIOREGS(0x73, 0x09, 0x76, 0x0F, 0x00, 0x01, 0x07, 0x15,
1204 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A, 1204 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
1205 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80), 1205 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
1206 PHYREGS(0xCD03, 0xC903, 0xC503, 0x3604, 0x3A04, 0x3F04), 1206 PHYREGS(0x03CD, 0x03C9, 0x03C5, 0x0436, 0x043A, 0x043F),
1207 }, 1207 },
1208 { .channel = 4, 1208 { .channel = 4,
1209 .freq = 2427, /* MHz */ 1209 .freq = 2427, /* MHz */
@@ -1211,7 +1211,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1211 RADIOREGS(0x73, 0x09, 0x7B, 0x0F, 0x00, 0x01, 0x07, 0x15, 1211 RADIOREGS(0x73, 0x09, 0x7B, 0x0F, 0x00, 0x01, 0x07, 0x15,
1212 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A, 1212 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
1213 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80), 1213 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
1214 PHYREGS(0xCF03, 0xCB03, 0xC703, 0x3404, 0x3804, 0x3D04), 1214 PHYREGS(0x03CF, 0x03CB, 0x03C7, 0x0434, 0x0438, 0x043D),
1215 }, 1215 },
1216 { .channel = 5, 1216 { .channel = 5,
1217 .freq = 2432, /* MHz */ 1217 .freq = 2432, /* MHz */
@@ -1219,7 +1219,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1219 RADIOREGS(0x73, 0x09, 0x80, 0x0F, 0x00, 0x01, 0x07, 0x15, 1219 RADIOREGS(0x73, 0x09, 0x80, 0x0F, 0x00, 0x01, 0x07, 0x15,
1220 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x09, 1220 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x09,
1221 0x80, 0xFF, 0x88, 0x0C, 0x09, 0x80), 1221 0x80, 0xFF, 0x88, 0x0C, 0x09, 0x80),
1222 PHYREGS(0xD103, 0xCD03, 0xC903, 0x3104, 0x3604, 0x3A04), 1222 PHYREGS(0x03D1, 0x03CD, 0x03C9, 0x0431, 0x0436, 0x043A),
1223 }, 1223 },
1224 { .channel = 6, 1224 { .channel = 6,
1225 .freq = 2437, /* MHz */ 1225 .freq = 2437, /* MHz */
@@ -1227,7 +1227,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1227 RADIOREGS(0x73, 0x09, 0x85, 0x0F, 0x00, 0x01, 0x07, 0x15, 1227 RADIOREGS(0x73, 0x09, 0x85, 0x0F, 0x00, 0x01, 0x07, 0x15,
1228 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0B, 0x08, 1228 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0B, 0x08,
1229 0x80, 0xFF, 0x88, 0x0B, 0x08, 0x80), 1229 0x80, 0xFF, 0x88, 0x0B, 0x08, 0x80),
1230 PHYREGS(0xD303, 0xCF03, 0xCB03, 0x2F04, 0x3404, 0x3804), 1230 PHYREGS(0x03D3, 0x03CF, 0x03CB, 0x042F, 0x0434, 0x0438),
1231 }, 1231 },
1232 { .channel = 7, 1232 { .channel = 7,
1233 .freq = 2442, /* MHz */ 1233 .freq = 2442, /* MHz */
@@ -1235,7 +1235,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1235 RADIOREGS(0x73, 0x09, 0x8A, 0x0F, 0x00, 0x01, 0x07, 0x15, 1235 RADIOREGS(0x73, 0x09, 0x8A, 0x0F, 0x00, 0x01, 0x07, 0x15,
1236 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x07, 1236 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x07,
1237 0x80, 0xFF, 0x88, 0x0A, 0x07, 0x80), 1237 0x80, 0xFF, 0x88, 0x0A, 0x07, 0x80),
1238 PHYREGS(0xD503, 0xD103, 0xCD03, 0x2D04, 0x3104, 0x3604), 1238 PHYREGS(0x03D5, 0x03D1, 0x03CD, 0x042D, 0x0431, 0x0436),
1239 }, 1239 },
1240 { .channel = 8, 1240 { .channel = 8,
1241 .freq = 2447, /* MHz */ 1241 .freq = 2447, /* MHz */
@@ -1243,7 +1243,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1243 RADIOREGS(0x73, 0x09, 0x8F, 0x0F, 0x00, 0x01, 0x07, 0x15, 1243 RADIOREGS(0x73, 0x09, 0x8F, 0x0F, 0x00, 0x01, 0x07, 0x15,
1244 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x06, 1244 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x06,
1245 0x80, 0xFF, 0x88, 0x0A, 0x06, 0x80), 1245 0x80, 0xFF, 0x88, 0x0A, 0x06, 0x80),
1246 PHYREGS(0xD703, 0xD303, 0xCF03, 0x2B04, 0x2F04, 0x3404), 1246 PHYREGS(0x03D7, 0x03D3, 0x03CF, 0x042B, 0x042F, 0x0434),
1247 }, 1247 },
1248 { .channel = 9, 1248 { .channel = 9,
1249 .freq = 2452, /* MHz */ 1249 .freq = 2452, /* MHz */
@@ -1251,7 +1251,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1251 RADIOREGS(0x73, 0x09, 0x94, 0x0F, 0x00, 0x01, 0x07, 0x15, 1251 RADIOREGS(0x73, 0x09, 0x94, 0x0F, 0x00, 0x01, 0x07, 0x15,
1252 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x09, 0x06, 1252 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x09, 0x06,
1253 0x80, 0xFF, 0x88, 0x09, 0x06, 0x80), 1253 0x80, 0xFF, 0x88, 0x09, 0x06, 0x80),
1254 PHYREGS(0xD903, 0xD503, 0xD103, 0x2904, 0x2D04, 0x3104), 1254 PHYREGS(0x03D9, 0x03D5, 0x03D1, 0x0429, 0x042D, 0x0431),
1255 }, 1255 },
1256 { .channel = 10, 1256 { .channel = 10,
1257 .freq = 2457, /* MHz */ 1257 .freq = 2457, /* MHz */
@@ -1259,7 +1259,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1259 RADIOREGS(0x73, 0x09, 0x99, 0x0F, 0x00, 0x01, 0x07, 0x15, 1259 RADIOREGS(0x73, 0x09, 0x99, 0x0F, 0x00, 0x01, 0x07, 0x15,
1260 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x05, 1260 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x05,
1261 0x80, 0xFF, 0x88, 0x08, 0x05, 0x80), 1261 0x80, 0xFF, 0x88, 0x08, 0x05, 0x80),
1262 PHYREGS(0xDB03, 0xD703, 0xD303, 0x2704, 0x2B04, 0x2F04), 1262 PHYREGS(0x03DB, 0x03D7, 0x03D3, 0x0427, 0x042B, 0x042F),
1263 }, 1263 },
1264 { .channel = 11, 1264 { .channel = 11,
1265 .freq = 2462, /* MHz */ 1265 .freq = 2462, /* MHz */
@@ -1267,7 +1267,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1267 RADIOREGS(0x73, 0x09, 0x9E, 0x0F, 0x00, 0x01, 0x07, 0x15, 1267 RADIOREGS(0x73, 0x09, 0x9E, 0x0F, 0x00, 0x01, 0x07, 0x15,
1268 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x04, 1268 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x04,
1269 0x80, 0xFF, 0x88, 0x08, 0x04, 0x80), 1269 0x80, 0xFF, 0x88, 0x08, 0x04, 0x80),
1270 PHYREGS(0xDD03, 0xD903, 0xD503, 0x2404, 0x2904, 0x2D04), 1270 PHYREGS(0x03DD, 0x03D9, 0x03D5, 0x0424, 0x0429, 0x042D),
1271 }, 1271 },
1272 { .channel = 12, 1272 { .channel = 12,
1273 .freq = 2467, /* MHz */ 1273 .freq = 2467, /* MHz */
@@ -1275,7 +1275,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1275 RADIOREGS(0x73, 0x09, 0xA3, 0x0F, 0x00, 0x01, 0x07, 0x15, 1275 RADIOREGS(0x73, 0x09, 0xA3, 0x0F, 0x00, 0x01, 0x07, 0x15,
1276 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x03, 1276 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x03,
1277 0x80, 0xFF, 0x88, 0x08, 0x03, 0x80), 1277 0x80, 0xFF, 0x88, 0x08, 0x03, 0x80),
1278 PHYREGS(0xDF03, 0xDB03, 0xD703, 0x2204, 0x2704, 0x2B04), 1278 PHYREGS(0x03DF, 0x03DB, 0x03D7, 0x0422, 0x0427, 0x042B),
1279 }, 1279 },
1280 { .channel = 13, 1280 { .channel = 13,
1281 .freq = 2472, /* MHz */ 1281 .freq = 2472, /* MHz */
@@ -1283,7 +1283,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1283 RADIOREGS(0x73, 0x09, 0xA8, 0x0F, 0x00, 0x01, 0x07, 0x15, 1283 RADIOREGS(0x73, 0x09, 0xA8, 0x0F, 0x00, 0x01, 0x07, 0x15,
1284 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x03, 1284 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x03,
1285 0x80, 0xFF, 0x88, 0x07, 0x03, 0x80), 1285 0x80, 0xFF, 0x88, 0x07, 0x03, 0x80),
1286 PHYREGS(0xE103, 0xDD03, 0xD903, 0x2004, 0x2404, 0x2904), 1286 PHYREGS(0x03E1, 0x03DD, 0x03D9, 0x0420, 0x0424, 0x0429),
1287 }, 1287 },
1288 { .channel = 14, 1288 { .channel = 14,
1289 .freq = 2484, /* MHz */ 1289 .freq = 2484, /* MHz */
@@ -1291,7 +1291,7 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
1291 RADIOREGS(0x73, 0x09, 0xB4, 0x0F, 0xFF, 0x01, 0x07, 0x15, 1291 RADIOREGS(0x73, 0x09, 0xB4, 0x0F, 0xFF, 0x01, 0x07, 0x15,
1292 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x01, 1292 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x01,
1293 0x80, 0xFF, 0x88, 0x07, 0x01, 0x80), 1293 0x80, 0xFF, 0x88, 0x07, 0x01, 0x80),
1294 PHYREGS(0xE603, 0xE203, 0xDE03, 0x1B04, 0x1F04, 0x2404), 1294 PHYREGS(0x03E6, 0x03E2, 0x03DE, 0x041B, 0x041F, 0x0424),
1295 }, 1295 },
1296}; 1296};
1297 1297
@@ -1299,7 +1299,7 @@ void b2055_upload_inittab(struct b43_wldev *dev,
1299 bool ghz5, bool ignore_uploadflag) 1299 bool ghz5, bool ignore_uploadflag)
1300{ 1300{
1301 const struct b2055_inittab_entry *e; 1301 const struct b2055_inittab_entry *e;
1302 unsigned int i; 1302 unsigned int i, writes = 0;
1303 u16 value; 1303 u16 value;
1304 1304
1305 for (i = 0; i < ARRAY_SIZE(b2055_inittab); i++) { 1305 for (i = 0; i < ARRAY_SIZE(b2055_inittab); i++) {
@@ -1312,6 +1312,8 @@ void b2055_upload_inittab(struct b43_wldev *dev,
1312 else 1312 else
1313 value = e->ghz2; 1313 value = e->ghz2;
1314 b43_radio_write16(dev, i, value); 1314 b43_radio_write16(dev, i, value);
1315 if (++writes % 4 == 0)
1316 b43_read32(dev, B43_MMIO_MACCTL); /* flush */
1315 } 1317 }
1316 } 1318 }
1317} 1319}
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index d8563192ce5..0cdf6a46ba4 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -24,9 +24,6028 @@
24#include "radio_2056.h" 24#include "radio_2056.h"
25#include "phy_common.h" 25#include "phy_common.h"
26 26
27#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
28 r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
29 r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
30 r30, r31, r32, r33, r34, r35, r36) \
31 .radio_syn_pll_vcocal1 = r00, \
32 .radio_syn_pll_vcocal2 = r01, \
33 .radio_syn_pll_refdiv = r02, \
34 .radio_syn_pll_mmd2 = r03, \
35 .radio_syn_pll_mmd1 = r04, \
36 .radio_syn_pll_loopfilter1 = r05, \
37 .radio_syn_pll_loopfilter2 = r06, \
38 .radio_syn_pll_loopfilter3 = r07, \
39 .radio_syn_pll_loopfilter4 = r08, \
40 .radio_syn_pll_loopfilter5 = r09, \
41 .radio_syn_reserved_addr27 = r10, \
42 .radio_syn_reserved_addr28 = r11, \
43 .radio_syn_reserved_addr29 = r12, \
44 .radio_syn_logen_vcobuf1 = r13, \
45 .radio_syn_logen_mixer2 = r14, \
46 .radio_syn_logen_buf3 = r15, \
47 .radio_syn_logen_buf4 = r16, \
48 .radio_rx0_lnaa_tune = r17, \
49 .radio_rx0_lnag_tune = r18, \
50 .radio_tx0_intpaa_boost_tune = r19, \
51 .radio_tx0_intpag_boost_tune = r20, \
52 .radio_tx0_pada_boost_tune = r21, \
53 .radio_tx0_padg_boost_tune = r22, \
54 .radio_tx0_pgaa_boost_tune = r23, \
55 .radio_tx0_pgag_boost_tune = r24, \
56 .radio_tx0_mixa_boost_tune = r25, \
57 .radio_tx0_mixg_boost_tune = r26, \
58 .radio_rx1_lnaa_tune = r27, \
59 .radio_rx1_lnag_tune = r28, \
60 .radio_tx1_intpaa_boost_tune = r29, \
61 .radio_tx1_intpag_boost_tune = r30, \
62 .radio_tx1_pada_boost_tune = r31, \
63 .radio_tx1_padg_boost_tune = r32, \
64 .radio_tx1_pgaa_boost_tune = r33, \
65 .radio_tx1_pgag_boost_tune = r34, \
66 .radio_tx1_mixa_boost_tune = r35, \
67 .radio_tx1_mixg_boost_tune = r36
68
69#define PHYREGS(r0, r1, r2, r3, r4, r5) \
70 .phy_regs.phy_bw1a = r0, \
71 .phy_regs.phy_bw2 = r1, \
72 .phy_regs.phy_bw3 = r2, \
73 .phy_regs.phy_bw4 = r3, \
74 .phy_regs.phy_bw5 = r4, \
75 .phy_regs.phy_bw6 = r5
76
77/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
27static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = { 78static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
79 { .freq = 4920,
80 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
81 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
82 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
83 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
84 0x00, 0x0b, 0x00, 0xff, 0x00),
85 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
86 },
87 { .freq = 4930,
88 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
89 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
90 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
91 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
92 0x00, 0x0b, 0x00, 0xff, 0x00),
93 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
94 },
95 { .freq = 4940,
96 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
97 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
98 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
99 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
100 0x00, 0x0b, 0x00, 0xff, 0x00),
101 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
102 },
103 { .freq = 4950,
104 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
105 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
106 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
107 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
108 0x00, 0x0b, 0x00, 0xff, 0x00),
109 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
110 },
111 { .freq = 4960,
112 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
113 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
114 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
115 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
116 0x00, 0x0b, 0x00, 0xff, 0x00),
117 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
118 },
119 { .freq = 4970,
120 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
121 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
122 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
123 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
124 0x00, 0x0b, 0x00, 0xff, 0x00),
125 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
126 },
127 { .freq = 4980,
128 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
129 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
130 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
131 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
132 0x00, 0x0b, 0x00, 0xff, 0x00),
133 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
134 },
135 { .freq = 4990,
136 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
137 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
138 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
139 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
140 0x00, 0x0b, 0x00, 0xff, 0x00),
141 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
142 },
143 { .freq = 5000,
144 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
145 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
146 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
147 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
148 0x00, 0x0b, 0x00, 0xff, 0x00),
149 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
150 },
151 { .freq = 5010,
152 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
153 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
154 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
155 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
156 0x00, 0x0b, 0x00, 0xff, 0x00),
157 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
158 },
159 { .freq = 5020,
160 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
161 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
162 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
163 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
164 0x00, 0x0b, 0x00, 0xff, 0x00),
165 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
166 },
167 { .freq = 5030,
168 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
169 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
170 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
171 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
172 0x00, 0x0b, 0x00, 0xff, 0x00),
173 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
174 },
175 { .freq = 5040,
176 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
177 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
178 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
179 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
180 0x00, 0x0b, 0x00, 0xff, 0x00),
181 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
182 },
183 { .freq = 5050,
184 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
185 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
186 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
187 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
188 0x00, 0x0b, 0x00, 0xff, 0x00),
189 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
190 },
191 { .freq = 5060,
192 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
193 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
194 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
195 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
196 0x00, 0x0b, 0x00, 0xff, 0x00),
197 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
198 },
199 { .freq = 5070,
200 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
201 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
202 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
203 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
204 0x00, 0x0b, 0x00, 0xff, 0x00),
205 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
206 },
207 { .freq = 5080,
208 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
209 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
210 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
211 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
212 0x00, 0x0b, 0x00, 0xff, 0x00),
213 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
214 },
215 { .freq = 5090,
216 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
217 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
218 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
219 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
220 0x00, 0x0b, 0x00, 0xff, 0x00),
221 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
222 },
223 { .freq = 5100,
224 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
225 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
226 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
227 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
228 0x00, 0x0b, 0x00, 0xff, 0x00),
229 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
230 },
231 { .freq = 5110,
232 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
233 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
234 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
235 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
236 0x00, 0x0b, 0x00, 0xfc, 0x00),
237 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
238 },
239 { .freq = 5120,
240 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
241 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
242 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
243 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
244 0x00, 0x0b, 0x00, 0xfc, 0x00),
245 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
246 },
247 { .freq = 5130,
248 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
249 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
250 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
251 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
252 0x00, 0x0b, 0x00, 0xfc, 0x00),
253 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
254 },
255 { .freq = 5140,
256 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
257 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
258 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
259 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
260 0x00, 0x0b, 0x00, 0xfc, 0x00),
261 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
262 },
263 { .freq = 5160,
264 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
265 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
266 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
267 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
268 0x00, 0x0b, 0x00, 0xfc, 0x00),
269 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
270 },
271 { .freq = 5170,
272 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
273 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
274 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
275 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
276 0x00, 0x0b, 0x00, 0xfc, 0x00),
277 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
278 },
279 { .freq = 5180,
280 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
281 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
282 0xff, 0xef, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
283 0x00, 0xfc, 0x00, 0xef, 0x00, 0x07, 0x00, 0x7f,
284 0x00, 0x0b, 0x00, 0xfc, 0x00),
285 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
286 },
287 { .freq = 5190,
288 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
289 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
290 0xff, 0xef, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
291 0x00, 0xfc, 0x00, 0xef, 0x00, 0x07, 0x00, 0x7f,
292 0x00, 0x0b, 0x00, 0xfc, 0x00),
293 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
294 },
295 { .freq = 5200,
296 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
297 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
298 0xff, 0xef, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
299 0x00, 0xfc, 0x00, 0xef, 0x00, 0x06, 0x00, 0x7f,
300 0x00, 0x0a, 0x00, 0xfc, 0x00),
301 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
302 },
303 { .freq = 5210,
304 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
305 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
306 0xff, 0xdf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
307 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x06, 0x00, 0x7f,
308 0x00, 0x0a, 0x00, 0xfc, 0x00),
309 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
310 },
311 { .freq = 5220,
312 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
313 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
314 0xff, 0xdf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
315 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x06, 0x00, 0x7f,
316 0x00, 0x0a, 0x00, 0xfc, 0x00),
317 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
318 },
319 { .freq = 5230,
320 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
321 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
322 0xff, 0xdf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
323 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x06, 0x00, 0x7f,
324 0x00, 0x0a, 0x00, 0xfc, 0x00),
325 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
326 },
327 { .freq = 5240,
328 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
329 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
330 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
331 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
332 0x00, 0x0a, 0x00, 0xfc, 0x00),
333 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
334 },
335 { .freq = 5250,
336 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
337 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
338 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
339 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
340 0x00, 0x0a, 0x00, 0xfc, 0x00),
341 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
342 },
343 { .freq = 5260,
344 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
345 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
346 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
347 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
348 0x00, 0x0a, 0x00, 0xfc, 0x00),
349 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
350 },
351 { .freq = 5270,
352 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
353 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
354 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
355 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
356 0x00, 0x0a, 0x00, 0xfc, 0x00),
357 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
358 },
359 { .freq = 5280,
360 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
361 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
362 0xff, 0xbf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
363 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x06, 0x00, 0x7f,
364 0x00, 0x0a, 0x00, 0xfc, 0x00),
365 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
366 },
367 { .freq = 5290,
368 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
369 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
370 0xff, 0xbf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
371 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x06, 0x00, 0x7f,
372 0x00, 0x0a, 0x00, 0xfc, 0x00),
373 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
374 },
375 { .freq = 5300,
376 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
377 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
378 0xff, 0xbf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
379 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x05, 0x00, 0x7f,
380 0x00, 0x09, 0x00, 0xfc, 0x00),
381 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
382 },
383 { .freq = 5310,
384 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
385 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
386 0xff, 0xbf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
387 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x05, 0x00, 0x7f,
388 0x00, 0x09, 0x00, 0xfa, 0x00),
389 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
390 },
391 { .freq = 5320,
392 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
393 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
394 0xff, 0xbf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
395 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x05, 0x00, 0x7f,
396 0x00, 0x09, 0x00, 0xfa, 0x00),
397 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
398 },
399 { .freq = 5330,
400 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
401 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
402 0xff, 0xaf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
403 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x05, 0x00, 0x7f,
404 0x00, 0x09, 0x00, 0xfa, 0x00),
405 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
406 },
407 { .freq = 5340,
408 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
409 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
410 0xff, 0xaf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
411 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x05, 0x00, 0x7f,
412 0x00, 0x09, 0x00, 0xfa, 0x00),
413 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
414 },
415 { .freq = 5350,
416 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
417 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
418 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
419 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
420 0x00, 0x09, 0x00, 0xfa, 0x00),
421 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
422 },
423 { .freq = 5360,
424 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
425 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
426 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
427 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
428 0x00, 0x09, 0x00, 0xfa, 0x00),
429 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
430 },
431 { .freq = 5370,
432 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
433 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
434 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
435 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
436 0x00, 0x09, 0x00, 0xfa, 0x00),
437 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
438 },
439 { .freq = 5380,
440 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
441 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
442 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
443 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
444 0x00, 0x09, 0x00, 0xfa, 0x00),
445 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
446 },
447 { .freq = 5390,
448 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
449 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
450 0xff, 0x8f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
451 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x05, 0x00, 0x7f,
452 0x00, 0x09, 0x00, 0xfa, 0x00),
453 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
454 },
455 { .freq = 5400,
456 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
457 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
458 0xc8, 0x8f, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
459 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x04, 0x00, 0x7f,
460 0x00, 0x08, 0x00, 0xfa, 0x00),
461 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
462 },
463 { .freq = 5410,
464 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
465 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
466 0xc8, 0x8f, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
467 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x04, 0x00, 0x7f,
468 0x00, 0x08, 0x00, 0xfa, 0x00),
469 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
470 },
471 { .freq = 5420,
472 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
473 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
474 0xc8, 0x8e, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
475 0x00, 0xfa, 0x00, 0x8e, 0x00, 0x04, 0x00, 0x7f,
476 0x00, 0x08, 0x00, 0xfa, 0x00),
477 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
478 },
479 { .freq = 5430,
480 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
481 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
482 0xc8, 0x8e, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
483 0x00, 0xfa, 0x00, 0x8e, 0x00, 0x04, 0x00, 0x7f,
484 0x00, 0x08, 0x00, 0xfa, 0x00),
485 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
486 },
487 { .freq = 5440,
488 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
489 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
490 0xc8, 0x7e, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
491 0x00, 0xfa, 0x00, 0x7e, 0x00, 0x04, 0x00, 0x7f,
492 0x00, 0x08, 0x00, 0xfa, 0x00),
493 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
494 },
495 { .freq = 5450,
496 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
497 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
498 0xc8, 0x7d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
499 0x00, 0xfa, 0x00, 0x7d, 0x00, 0x04, 0x00, 0x7f,
500 0x00, 0x08, 0x00, 0xfa, 0x00),
501 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
502 },
503 { .freq = 5460,
504 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
505 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
506 0xc8, 0x6d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
507 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x04, 0x00, 0x7f,
508 0x00, 0x08, 0x00, 0xf8, 0x00),
509 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
510 },
511 { .freq = 5470,
512 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
513 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
514 0xc8, 0x6d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
515 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x04, 0x00, 0x7f,
516 0x00, 0x08, 0x00, 0xf8, 0x00),
517 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
518 },
519 { .freq = 5480,
520 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
521 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
522 0xc8, 0x5d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
523 0x00, 0xf8, 0x00, 0x5d, 0x00, 0x04, 0x00, 0x7f,
524 0x00, 0x08, 0x00, 0xf8, 0x00),
525 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
526 },
527 { .freq = 5490,
528 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
529 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
530 0xc8, 0x5c, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
531 0x00, 0xf8, 0x00, 0x5c, 0x00, 0x04, 0x00, 0x7f,
532 0x00, 0x08, 0x00, 0xf8, 0x00),
533 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
534 },
535 { .freq = 5500,
536 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
537 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
538 0x84, 0x5c, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
539 0x00, 0xf8, 0x00, 0x5c, 0x00, 0x03, 0x00, 0x7f,
540 0x00, 0x07, 0x00, 0xf8, 0x00),
541 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
542 },
543 { .freq = 5510,
544 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
545 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
546 0x84, 0x4c, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
547 0x00, 0xf8, 0x00, 0x4c, 0x00, 0x03, 0x00, 0x7f,
548 0x00, 0x07, 0x00, 0xf8, 0x00),
549 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
550 },
551 { .freq = 5520,
552 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
553 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
554 0x84, 0x4c, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
555 0x00, 0xf8, 0x00, 0x4c, 0x00, 0x03, 0x00, 0x7f,
556 0x00, 0x07, 0x00, 0xf8, 0x00),
557 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
558 },
559 { .freq = 5530,
560 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
561 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
562 0x84, 0x3b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
563 0x00, 0xf8, 0x00, 0x3b, 0x00, 0x03, 0x00, 0x7f,
564 0x00, 0x07, 0x00, 0xf8, 0x00),
565 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
566 },
567 { .freq = 5540,
568 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
569 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
570 0x84, 0x3b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
571 0x00, 0xf8, 0x00, 0x3b, 0x00, 0x03, 0x00, 0x7f,
572 0x00, 0x07, 0x00, 0xf8, 0x00),
573 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
574 },
575 { .freq = 5550,
576 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
577 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
578 0x84, 0x3b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
579 0x00, 0xf8, 0x00, 0x3b, 0x00, 0x03, 0x00, 0x7f,
580 0x00, 0x07, 0x00, 0xf8, 0x00),
581 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
582 },
583 { .freq = 5560,
584 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
585 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
586 0x84, 0x2b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
587 0x00, 0xf8, 0x00, 0x2b, 0x00, 0x03, 0x00, 0x7f,
588 0x00, 0x07, 0x00, 0xf8, 0x00),
589 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
590 },
591 { .freq = 5570,
592 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
593 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
594 0x84, 0x2a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
595 0x00, 0xf8, 0x00, 0x2a, 0x00, 0x03, 0x00, 0x7f,
596 0x00, 0x07, 0x00, 0xf8, 0x00),
597 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
598 },
599 { .freq = 5580,
600 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
601 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
602 0x84, 0x1a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
603 0x00, 0xf8, 0x00, 0x1a, 0x00, 0x03, 0x00, 0x7f,
604 0x00, 0x07, 0x00, 0xf8, 0x00),
605 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
606 },
607 { .freq = 5590,
608 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
609 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
610 0x84, 0x1a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
611 0x00, 0xf8, 0x00, 0x1a, 0x00, 0x03, 0x00, 0x7f,
612 0x00, 0x07, 0x00, 0xf8, 0x00),
613 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
614 },
615 { .freq = 5600,
616 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
617 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
618 0x70, 0x1a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
619 0x00, 0xf8, 0x00, 0x1a, 0x00, 0x03, 0x00, 0x7f,
620 0x00, 0x07, 0x00, 0xf8, 0x00),
621 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
622 },
623 { .freq = 5610,
624 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
625 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
626 0x70, 0x19, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
627 0x00, 0xf8, 0x00, 0x19, 0x00, 0x03, 0x00, 0x7f,
628 0x00, 0x07, 0x00, 0xf8, 0x00),
629 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
630 },
631 { .freq = 5620,
632 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
633 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
634 0x70, 0x19, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
635 0x00, 0xf8, 0x00, 0x19, 0x00, 0x03, 0x00, 0x7f,
636 0x00, 0x07, 0x00, 0xf8, 0x00),
637 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
638 },
639 { .freq = 5630,
640 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
641 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
642 0x70, 0x09, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
643 0x00, 0xf8, 0x00, 0x09, 0x00, 0x03, 0x00, 0x7f,
644 0x00, 0x07, 0x00, 0xf8, 0x00),
645 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
646 },
647 { .freq = 5640,
648 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
649 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
650 0x70, 0x09, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
651 0x00, 0xf8, 0x00, 0x09, 0x00, 0x03, 0x00, 0x7f,
652 0x00, 0x07, 0x00, 0xf8, 0x00),
653 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
654 },
655 { .freq = 5650,
656 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
657 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
658 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
659 0x00, 0xf8, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
660 0x00, 0x07, 0x00, 0xf8, 0x00),
661 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
662 },
663 { .freq = 5660,
664 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
665 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
666 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
667 0x00, 0xf6, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
668 0x00, 0x07, 0x00, 0xf6, 0x00),
669 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
670 },
671 { .freq = 5670,
672 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
673 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
674 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
675 0x00, 0xf6, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
676 0x00, 0x07, 0x00, 0xf6, 0x00),
677 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
678 },
679 { .freq = 5680,
680 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
681 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
682 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
683 0x00, 0xf6, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
684 0x00, 0x07, 0x00, 0xf6, 0x00),
685 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
686 },
687 { .freq = 5690,
688 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
689 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
690 0x70, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
691 0x00, 0xf6, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
692 0x00, 0x07, 0x00, 0xf6, 0x00),
693 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
694 },
695 { .freq = 5700,
696 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
697 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
698 0x40, 0x07, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
699 0x00, 0xf6, 0x00, 0x07, 0x00, 0x02, 0x00, 0x7f,
700 0x00, 0x06, 0x00, 0xf6, 0x00),
701 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
702 },
703 { .freq = 5710,
704 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
705 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
706 0x40, 0x07, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
707 0x00, 0xf4, 0x00, 0x07, 0x00, 0x02, 0x00, 0x7f,
708 0x00, 0x06, 0x00, 0xf4, 0x00),
709 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
710 },
711 { .freq = 5720,
712 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
713 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
714 0x40, 0x07, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
715 0x00, 0xf4, 0x00, 0x07, 0x00, 0x02, 0x00, 0x7f,
716 0x00, 0x06, 0x00, 0xf4, 0x00),
717 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
718 },
719 { .freq = 5725,
720 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
721 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
722 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
723 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
724 0x00, 0x06, 0x00, 0xf4, 0x00),
725 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
726 },
727 { .freq = 5730,
728 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
729 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
730 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
731 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
732 0x00, 0x06, 0x00, 0xf4, 0x00),
733 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
734 },
735 { .freq = 5735,
736 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
737 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
738 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
739 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
740 0x00, 0x06, 0x00, 0xf4, 0x00),
741 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
742 },
743 { .freq = 5740,
744 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
745 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
746 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
747 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
748 0x00, 0x06, 0x00, 0xf4, 0x00),
749 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
750 },
751 { .freq = 5745,
752 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
753 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
754 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
755 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
756 0x00, 0x06, 0x00, 0xf4, 0x00),
757 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
758 },
759 { .freq = 5750,
760 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
761 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
762 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
763 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
764 0x00, 0x06, 0x00, 0xf4, 0x00),
765 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
766 },
767 { .freq = 5755,
768 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
769 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
770 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
771 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
772 0x00, 0x06, 0x00, 0xf4, 0x00),
773 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
774 },
775 { .freq = 5760,
776 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
777 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
778 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
779 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
780 0x00, 0x06, 0x00, 0xf4, 0x00),
781 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
782 },
783 { .freq = 5765,
784 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
785 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
786 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
787 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
788 0x00, 0x06, 0x00, 0xf4, 0x00),
789 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
790 },
791 { .freq = 5770,
792 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
793 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
794 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
795 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
796 0x00, 0x06, 0x00, 0xf4, 0x00),
797 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
798 },
799 { .freq = 5775,
800 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
801 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
802 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
803 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
804 0x00, 0x06, 0x00, 0xf4, 0x00),
805 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
806 },
807 { .freq = 5780,
808 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
809 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
810 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
811 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
812 0x00, 0x06, 0x00, 0xf4, 0x00),
813 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
814 },
815 { .freq = 5785,
816 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
817 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
818 0x40, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
819 0x00, 0xf4, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
820 0x00, 0x06, 0x00, 0xf4, 0x00),
821 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
822 },
823 { .freq = 5790,
824 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
825 0x0c, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
826 0x40, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
827 0x00, 0xf4, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
828 0x00, 0x06, 0x00, 0xf4, 0x00),
829 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
830 },
831 { .freq = 5795,
832 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
833 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
834 0x40, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
835 0x00, 0xf4, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
836 0x00, 0x06, 0x00, 0xf4, 0x00),
837 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
838 },
839 { .freq = 5800,
840 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
841 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
842 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
843 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
844 0x00, 0x06, 0x00, 0xf4, 0x00),
845 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
846 },
847 { .freq = 5805,
848 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
849 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
850 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
851 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
852 0x00, 0x06, 0x00, 0xf4, 0x00),
853 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
854 },
855 { .freq = 5810,
856 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
857 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
858 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
859 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
860 0x00, 0x06, 0x00, 0xf4, 0x00),
861 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
862 },
863 { .freq = 5815,
864 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
865 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
866 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
867 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
868 0x00, 0x06, 0x00, 0xf4, 0x00),
869 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
870 },
871 { .freq = 5820,
872 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
873 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
874 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
875 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
876 0x00, 0x06, 0x00, 0xf4, 0x00),
877 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
878 },
879 { .freq = 5825,
880 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
881 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
882 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
883 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
884 0x00, 0x06, 0x00, 0xf4, 0x00),
885 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
886 },
887 { .freq = 5830,
888 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
889 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
890 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
891 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
892 0x00, 0x06, 0x00, 0xf4, 0x00),
893 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
894 },
895 { .freq = 5840,
896 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
897 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
898 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
899 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
900 0x00, 0x06, 0x00, 0xf4, 0x00),
901 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
902 },
903 { .freq = 5850,
904 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
905 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
906 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
907 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
908 0x00, 0x06, 0x00, 0xf4, 0x00),
909 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
910 },
911 { .freq = 5860,
912 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
913 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
914 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
915 0x00, 0xf2, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
916 0x00, 0x06, 0x00, 0xf2, 0x00),
917 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
918 },
919 { .freq = 5870,
920 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
921 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
922 0x20, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
923 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
924 0x00, 0x06, 0x00, 0xf2, 0x00),
925 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
926 },
927 { .freq = 5880,
928 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
929 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
930 0x20, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
931 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
932 0x00, 0x06, 0x00, 0xf2, 0x00),
933 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
934 },
935 { .freq = 5890,
936 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
937 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
938 0x20, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
939 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
940 0x00, 0x06, 0x00, 0xf2, 0x00),
941 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
942 },
943 { .freq = 5900,
944 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
945 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
946 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x05,
947 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
948 0x00, 0x05, 0x00, 0xf2, 0x00),
949 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
950 },
951 { .freq = 5910,
952 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
953 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
954 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x05,
955 0x00, 0xf2, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f,
956 0x00, 0x05, 0x00, 0xf2, 0x00),
957 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
958 },
959 { .freq = 2412,
960 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
961 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
962 0x00, 0x00, 0xff, 0x00, 0x05, 0x00, 0x70, 0x00,
963 0x0f, 0x00, 0x0f, 0x00, 0xff, 0x00, 0x05, 0x00,
964 0x70, 0x00, 0x0f, 0x00, 0x0f),
965 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
966 },
967 { .freq = 2417,
968 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
969 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
970 0x00, 0x00, 0xff, 0x00, 0x05, 0x00, 0x70, 0x00,
971 0x0f, 0x00, 0x0f, 0x00, 0xff, 0x00, 0x05, 0x00,
972 0x70, 0x00, 0x0f, 0x00, 0x0f),
973 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
974 },
975 { .freq = 2422,
976 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
977 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
978 0x00, 0x00, 0xff, 0x00, 0x05, 0x00, 0x70, 0x00,
979 0x0f, 0x00, 0x0f, 0x00, 0xff, 0x00, 0x05, 0x00,
980 0x70, 0x00, 0x0f, 0x00, 0x0f),
981 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
982 },
983 { .freq = 2427,
984 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
985 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
986 0x00, 0x00, 0xfd, 0x00, 0x05, 0x00, 0x70, 0x00,
987 0x0f, 0x00, 0x0f, 0x00, 0xfd, 0x00, 0x05, 0x00,
988 0x70, 0x00, 0x0f, 0x00, 0x0f),
989 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
990 },
991 { .freq = 2432,
992 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
993 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
994 0x00, 0x00, 0xfb, 0x00, 0x05, 0x00, 0x70, 0x00,
995 0x0f, 0x00, 0x0f, 0x00, 0xfb, 0x00, 0x05, 0x00,
996 0x70, 0x00, 0x0f, 0x00, 0x0f),
997 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
998 },
999 { .freq = 2437,
1000 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
1001 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
1002 0x00, 0x00, 0xfa, 0x00, 0x05, 0x00, 0x70, 0x00,
1003 0x0f, 0x00, 0x0f, 0x00, 0xfa, 0x00, 0x05, 0x00,
1004 0x70, 0x00, 0x0f, 0x00, 0x0f),
1005 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
1006 },
1007 { .freq = 2442,
1008 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
1009 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
1010 0x00, 0x00, 0xf8, 0x00, 0x05, 0x00, 0x70, 0x00,
1011 0x0f, 0x00, 0x0f, 0x00, 0xf8, 0x00, 0x05, 0x00,
1012 0x70, 0x00, 0x0f, 0x00, 0x0f),
1013 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
1014 },
1015 { .freq = 2447,
1016 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
1017 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
1018 0x00, 0x00, 0xf7, 0x00, 0x05, 0x00, 0x70, 0x00,
1019 0x0f, 0x00, 0x0f, 0x00, 0xf7, 0x00, 0x05, 0x00,
1020 0x70, 0x00, 0x0f, 0x00, 0x0f),
1021 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
1022 },
1023 { .freq = 2452,
1024 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
1025 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
1026 0x00, 0x00, 0xf6, 0x00, 0x05, 0x00, 0x70, 0x00,
1027 0x0f, 0x00, 0x0f, 0x00, 0xf6, 0x00, 0x05, 0x00,
1028 0x70, 0x00, 0x0f, 0x00, 0x0f),
1029 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
1030 },
1031 { .freq = 2457,
1032 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
1033 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
1034 0x00, 0x00, 0xf5, 0x00, 0x05, 0x00, 0x70, 0x00,
1035 0x0f, 0x00, 0x0d, 0x00, 0xf5, 0x00, 0x05, 0x00,
1036 0x70, 0x00, 0x0f, 0x00, 0x0d),
1037 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
1038 },
1039 { .freq = 2462,
1040 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
1041 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
1042 0x00, 0x00, 0xf4, 0x00, 0x05, 0x00, 0x70, 0x00,
1043 0x0f, 0x00, 0x0d, 0x00, 0xf4, 0x00, 0x05, 0x00,
1044 0x70, 0x00, 0x0f, 0x00, 0x0d),
1045 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
1046 },
1047 { .freq = 2467,
1048 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
1049 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
1050 0x00, 0x00, 0xf3, 0x00, 0x05, 0x00, 0x70, 0x00,
1051 0x0f, 0x00, 0x0d, 0x00, 0xf3, 0x00, 0x05, 0x00,
1052 0x70, 0x00, 0x0f, 0x00, 0x0d),
1053 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
1054 },
1055 { .freq = 2472,
1056 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
1057 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
1058 0x00, 0x00, 0xf2, 0x00, 0x05, 0x00, 0x70, 0x00,
1059 0x0f, 0x00, 0x0d, 0x00, 0xf2, 0x00, 0x05, 0x00,
1060 0x70, 0x00, 0x0f, 0x00, 0x0d),
1061 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
1062 },
1063 { .freq = 2484,
1064 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
1065 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
1066 0x00, 0x00, 0xf0, 0x00, 0x05, 0x00, 0x70, 0x00,
1067 0x0f, 0x00, 0x0d, 0x00, 0xf0, 0x00, 0x05, 0x00,
1068 0x70, 0x00, 0x0f, 0x00, 0x0d),
1069 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
1070 },
1071};
1072
1073static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = {
1074 { .freq = 4920,
1075 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
1076 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
1077 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
1078 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
1079 0x00, 0x0f, 0x00, 0xff, 0x00),
1080 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
1081 },
1082 { .freq = 4930,
1083 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
1084 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
1085 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
1086 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
1087 0x00, 0x0f, 0x00, 0xff, 0x00),
1088 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
1089 },
1090 { .freq = 4940,
1091 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
1092 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
1093 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
1094 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
1095 0x00, 0x0f, 0x00, 0xff, 0x00),
1096 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
1097 },
1098 { .freq = 4950,
1099 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
1100 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
1101 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
1102 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
1103 0x00, 0x0f, 0x00, 0xff, 0x00),
1104 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
1105 },
1106 { .freq = 4960,
1107 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
1108 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1109 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
1110 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
1111 0x00, 0x0f, 0x00, 0xff, 0x00),
1112 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
1113 },
1114 { .freq = 4970,
1115 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
1116 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1117 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
1118 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
1119 0x00, 0x0f, 0x00, 0xff, 0x00),
1120 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
1121 },
1122 { .freq = 4980,
1123 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
1124 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1125 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
1126 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
1127 0x00, 0x0f, 0x00, 0xff, 0x00),
1128 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
1129 },
1130 { .freq = 4990,
1131 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
1132 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1133 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
1134 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
1135 0x00, 0x0f, 0x00, 0xff, 0x00),
1136 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
1137 },
1138 { .freq = 5000,
1139 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
1140 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1141 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
1142 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
1143 0x00, 0x0f, 0x00, 0xff, 0x00),
1144 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
1145 },
1146 { .freq = 5010,
1147 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
1148 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1149 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
1150 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
1151 0x00, 0x0f, 0x00, 0xff, 0x00),
1152 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
1153 },
1154 { .freq = 5020,
1155 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
1156 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1157 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
1158 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
1159 0x00, 0x0f, 0x00, 0xff, 0x00),
1160 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
1161 },
1162 { .freq = 5030,
1163 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
1164 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1165 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
1166 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
1167 0x00, 0x0f, 0x00, 0xff, 0x00),
1168 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
1169 },
1170 { .freq = 5040,
1171 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
1172 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1173 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
1174 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
1175 0x00, 0x0f, 0x00, 0xff, 0x00),
1176 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
1177 },
1178 { .freq = 5050,
1179 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
1180 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1181 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
1182 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
1183 0x00, 0x0f, 0x00, 0xff, 0x00),
1184 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
1185 },
1186 { .freq = 5060,
1187 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
1188 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1189 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
1190 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
1191 0x00, 0x0f, 0x00, 0xff, 0x00),
1192 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
1193 },
1194 { .freq = 5070,
1195 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
1196 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1197 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
1198 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
1199 0x00, 0x0f, 0x00, 0xff, 0x00),
1200 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
1201 },
1202 { .freq = 5080,
1203 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
1204 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1205 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
1206 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
1207 0x00, 0x0f, 0x00, 0xff, 0x00),
1208 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
1209 },
1210 { .freq = 5090,
1211 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
1212 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
1213 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
1214 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
1215 0x00, 0x0f, 0x00, 0xff, 0x00),
1216 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
1217 },
1218 { .freq = 5100,
1219 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
1220 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1221 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
1222 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
1223 0x00, 0x0f, 0x00, 0xfe, 0x00),
1224 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
1225 },
1226 { .freq = 5110,
1227 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
1228 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1229 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
1230 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
1231 0x00, 0x0f, 0x00, 0xfe, 0x00),
1232 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
1233 },
1234 { .freq = 5120,
1235 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
1236 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1237 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
1238 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
1239 0x00, 0x0f, 0x00, 0xfe, 0x00),
1240 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
1241 },
1242 { .freq = 5130,
1243 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
1244 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1245 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
1246 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
1247 0x00, 0x0f, 0x00, 0xfe, 0x00),
1248 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
1249 },
1250 { .freq = 5140,
1251 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
1252 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1253 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
1254 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
1255 0x00, 0x0f, 0x00, 0xfe, 0x00),
1256 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
1257 },
1258 { .freq = 5160,
1259 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
1260 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1261 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
1262 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
1263 0x00, 0x0f, 0x00, 0xfe, 0x00),
1264 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
1265 },
1266 { .freq = 5170,
1267 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
1268 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1269 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
1270 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
1271 0x00, 0x0f, 0x00, 0xfe, 0x00),
1272 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
1273 },
1274 { .freq = 5180,
1275 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
1276 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1277 0xff, 0xef, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
1278 0x00, 0xfe, 0x00, 0xef, 0x00, 0x0c, 0x00, 0x7f,
1279 0x00, 0x0f, 0x00, 0xfe, 0x00),
1280 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
1281 },
1282 { .freq = 5190,
1283 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
1284 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1285 0xff, 0xef, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
1286 0x00, 0xfe, 0x00, 0xef, 0x00, 0x0c, 0x00, 0x7f,
1287 0x00, 0x0f, 0x00, 0xfe, 0x00),
1288 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
1289 },
1290 { .freq = 5200,
1291 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
1292 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1293 0xff, 0xef, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
1294 0x00, 0xfc, 0x00, 0xef, 0x00, 0x0a, 0x00, 0x7f,
1295 0x00, 0x0f, 0x00, 0xfc, 0x00),
1296 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
1297 },
1298 { .freq = 5210,
1299 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
1300 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1301 0xff, 0xdf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
1302 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x0a, 0x00, 0x7f,
1303 0x00, 0x0f, 0x00, 0xfc, 0x00),
1304 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
1305 },
1306 { .freq = 5220,
1307 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
1308 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1309 0xff, 0xdf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
1310 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x0a, 0x00, 0x7f,
1311 0x00, 0x0f, 0x00, 0xfc, 0x00),
1312 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
1313 },
1314 { .freq = 5230,
1315 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
1316 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1317 0xff, 0xdf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
1318 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x0a, 0x00, 0x7f,
1319 0x00, 0x0f, 0x00, 0xfc, 0x00),
1320 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
1321 },
1322 { .freq = 5240,
1323 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
1324 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1325 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
1326 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
1327 0x00, 0x0f, 0x00, 0xfc, 0x00),
1328 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
1329 },
1330 { .freq = 5250,
1331 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
1332 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1333 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
1334 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
1335 0x00, 0x0f, 0x00, 0xfc, 0x00),
1336 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
1337 },
1338 { .freq = 5260,
1339 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
1340 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
1341 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
1342 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
1343 0x00, 0x0f, 0x00, 0xfc, 0x00),
1344 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
1345 },
1346 { .freq = 5270,
1347 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
1348 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
1349 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
1350 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
1351 0x00, 0x0f, 0x00, 0xfc, 0x00),
1352 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
1353 },
1354 { .freq = 5280,
1355 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
1356 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
1357 0xff, 0xbf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
1358 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x0a, 0x00, 0x7f,
1359 0x00, 0x0f, 0x00, 0xfc, 0x00),
1360 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
1361 },
1362 { .freq = 5290,
1363 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
1364 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
1365 0xff, 0xbf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
1366 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x0a, 0x00, 0x7f,
1367 0x00, 0x0f, 0x00, 0xfc, 0x00),
1368 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
1369 },
1370 { .freq = 5300,
1371 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
1372 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
1373 0xff, 0xbf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
1374 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x08, 0x00, 0x7f,
1375 0x00, 0x0f, 0x00, 0xfa, 0x00),
1376 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
1377 },
1378 { .freq = 5310,
1379 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
1380 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
1381 0xff, 0xbf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
1382 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x08, 0x00, 0x7f,
1383 0x00, 0x0f, 0x00, 0xfa, 0x00),
1384 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
1385 },
1386 { .freq = 5320,
1387 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
1388 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
1389 0xff, 0xbf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
1390 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x08, 0x00, 0x7f,
1391 0x00, 0x0f, 0x00, 0xfa, 0x00),
1392 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
1393 },
1394 { .freq = 5330,
1395 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
1396 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
1397 0xff, 0xaf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
1398 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x08, 0x00, 0x7f,
1399 0x00, 0x0f, 0x00, 0xfa, 0x00),
1400 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
1401 },
1402 { .freq = 5340,
1403 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
1404 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
1405 0xff, 0xaf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
1406 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x08, 0x00, 0x7f,
1407 0x00, 0x0f, 0x00, 0xfa, 0x00),
1408 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
1409 },
1410 { .freq = 5350,
1411 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
1412 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
1413 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
1414 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
1415 0x00, 0x0f, 0x00, 0xfa, 0x00),
1416 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
1417 },
1418 { .freq = 5360,
1419 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
1420 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
1421 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
1422 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
1423 0x00, 0x0f, 0x00, 0xfa, 0x00),
1424 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
1425 },
1426 { .freq = 5370,
1427 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
1428 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
1429 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
1430 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
1431 0x00, 0x0f, 0x00, 0xfa, 0x00),
1432 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
1433 },
1434 { .freq = 5380,
1435 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
1436 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
1437 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
1438 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
1439 0x00, 0x0f, 0x00, 0xfa, 0x00),
1440 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
1441 },
1442 { .freq = 5390,
1443 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
1444 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
1445 0xff, 0x8f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
1446 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x08, 0x00, 0x7f,
1447 0x00, 0x0f, 0x00, 0xfa, 0x00),
1448 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
1449 },
1450 { .freq = 5400,
1451 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
1452 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
1453 0xc8, 0x8f, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
1454 0x00, 0xf8, 0x00, 0x8f, 0x00, 0x07, 0x00, 0x7f,
1455 0x00, 0x0f, 0x00, 0xf8, 0x00),
1456 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
1457 },
1458 { .freq = 5410,
1459 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
1460 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
1461 0xc8, 0x8f, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
1462 0x00, 0xf8, 0x00, 0x8f, 0x00, 0x07, 0x00, 0x7f,
1463 0x00, 0x0f, 0x00, 0xf8, 0x00),
1464 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
1465 },
1466 { .freq = 5420,
1467 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
1468 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
1469 0xc8, 0x8e, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
1470 0x00, 0xf8, 0x00, 0x8e, 0x00, 0x07, 0x00, 0x7f,
1471 0x00, 0x0f, 0x00, 0xf8, 0x00),
1472 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
1473 },
1474 { .freq = 5430,
1475 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
1476 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
1477 0xc8, 0x8e, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
1478 0x00, 0xf8, 0x00, 0x8e, 0x00, 0x07, 0x00, 0x7f,
1479 0x00, 0x0f, 0x00, 0xf8, 0x00),
1480 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
1481 },
1482 { .freq = 5440,
1483 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
1484 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
1485 0xc8, 0x7e, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
1486 0x00, 0xf8, 0x00, 0x7e, 0x00, 0x07, 0x00, 0x7f,
1487 0x00, 0x0f, 0x00, 0xf8, 0x00),
1488 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
1489 },
1490 { .freq = 5450,
1491 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
1492 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
1493 0xc8, 0x7d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
1494 0x00, 0xf8, 0x00, 0x7d, 0x00, 0x07, 0x00, 0x7f,
1495 0x00, 0x0f, 0x00, 0xf8, 0x00),
1496 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
1497 },
1498 { .freq = 5460,
1499 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
1500 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
1501 0xc8, 0x6d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
1502 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x07, 0x00, 0x7f,
1503 0x00, 0x0f, 0x00, 0xf8, 0x00),
1504 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
1505 },
1506 { .freq = 5470,
1507 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
1508 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
1509 0xc8, 0x6d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
1510 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x07, 0x00, 0x7f,
1511 0x00, 0x0f, 0x00, 0xf8, 0x00),
1512 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
1513 },
1514 { .freq = 5480,
1515 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
1516 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
1517 0xc8, 0x5d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
1518 0x00, 0xf8, 0x00, 0x5d, 0x00, 0x07, 0x00, 0x7f,
1519 0x00, 0x0f, 0x00, 0xf8, 0x00),
1520 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
1521 },
1522 { .freq = 5490,
1523 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
1524 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
1525 0xc8, 0x5c, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
1526 0x00, 0xf8, 0x00, 0x5c, 0x00, 0x07, 0x00, 0x7f,
1527 0x00, 0x0f, 0x00, 0xf8, 0x00),
1528 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
1529 },
1530 { .freq = 5500,
1531 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
1532 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
1533 0x84, 0x5c, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
1534 0x00, 0xf6, 0x00, 0x5c, 0x00, 0x06, 0x00, 0x7f,
1535 0x00, 0x0d, 0x00, 0xf6, 0x00),
1536 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
1537 },
1538 { .freq = 5510,
1539 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
1540 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
1541 0x84, 0x4c, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
1542 0x00, 0xf6, 0x00, 0x4c, 0x00, 0x06, 0x00, 0x7f,
1543 0x00, 0x0d, 0x00, 0xf6, 0x00),
1544 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
1545 },
1546 { .freq = 5520,
1547 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
1548 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
1549 0x84, 0x4c, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
1550 0x00, 0xf6, 0x00, 0x4c, 0x00, 0x06, 0x00, 0x7f,
1551 0x00, 0x0d, 0x00, 0xf6, 0x00),
1552 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
1553 },
1554 { .freq = 5530,
1555 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
1556 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
1557 0x84, 0x3b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
1558 0x00, 0xf6, 0x00, 0x3b, 0x00, 0x06, 0x00, 0x7f,
1559 0x00, 0x0d, 0x00, 0xf6, 0x00),
1560 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
1561 },
1562 { .freq = 5540,
1563 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
1564 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
1565 0x84, 0x3b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
1566 0x00, 0xf6, 0x00, 0x3b, 0x00, 0x06, 0x00, 0x7f,
1567 0x00, 0x0d, 0x00, 0xf6, 0x00),
1568 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
1569 },
1570 { .freq = 5550,
1571 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
1572 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
1573 0x84, 0x3b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
1574 0x00, 0xf6, 0x00, 0x3b, 0x00, 0x06, 0x00, 0x7f,
1575 0x00, 0x0d, 0x00, 0xf6, 0x00),
1576 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
1577 },
1578 { .freq = 5560,
1579 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
1580 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
1581 0x84, 0x2b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
1582 0x00, 0xf6, 0x00, 0x2b, 0x00, 0x06, 0x00, 0x7f,
1583 0x00, 0x0d, 0x00, 0xf6, 0x00),
1584 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
1585 },
1586 { .freq = 5570,
1587 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
1588 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
1589 0x84, 0x2a, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
1590 0x00, 0xf6, 0x00, 0x2a, 0x00, 0x06, 0x00, 0x7f,
1591 0x00, 0x0d, 0x00, 0xf6, 0x00),
1592 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
1593 },
1594 { .freq = 5580,
1595 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
1596 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
1597 0x84, 0x1a, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
1598 0x00, 0xf6, 0x00, 0x1a, 0x00, 0x06, 0x00, 0x7f,
1599 0x00, 0x0d, 0x00, 0xf6, 0x00),
1600 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
1601 },
1602 { .freq = 5590,
1603 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
1604 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
1605 0x84, 0x1a, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
1606 0x00, 0xf6, 0x00, 0x1a, 0x00, 0x06, 0x00, 0x7f,
1607 0x00, 0x0d, 0x00, 0xf6, 0x00),
1608 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
1609 },
1610 { .freq = 5600,
1611 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
1612 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
1613 0x70, 0x1a, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
1614 0x00, 0xf4, 0x00, 0x1a, 0x00, 0x04, 0x00, 0x7f,
1615 0x00, 0x0b, 0x00, 0xf4, 0x00),
1616 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
1617 },
1618 { .freq = 5610,
1619 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
1620 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
1621 0x70, 0x19, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
1622 0x00, 0xf4, 0x00, 0x19, 0x00, 0x04, 0x00, 0x7f,
1623 0x00, 0x0b, 0x00, 0xf4, 0x00),
1624 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
1625 },
1626 { .freq = 5620,
1627 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
1628 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
1629 0x70, 0x19, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
1630 0x00, 0xf4, 0x00, 0x19, 0x00, 0x04, 0x00, 0x7f,
1631 0x00, 0x0b, 0x00, 0xf4, 0x00),
1632 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
1633 },
1634 { .freq = 5630,
1635 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
1636 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
1637 0x70, 0x09, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
1638 0x00, 0xf4, 0x00, 0x09, 0x00, 0x04, 0x00, 0x7f,
1639 0x00, 0x0b, 0x00, 0xf4, 0x00),
1640 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
1641 },
1642 { .freq = 5640,
1643 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
1644 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
1645 0x70, 0x09, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
1646 0x00, 0xf4, 0x00, 0x09, 0x00, 0x04, 0x00, 0x7f,
1647 0x00, 0x0b, 0x00, 0xf4, 0x00),
1648 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
1649 },
1650 { .freq = 5650,
1651 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
1652 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
1653 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
1654 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
1655 0x00, 0x0b, 0x00, 0xf4, 0x00),
1656 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
1657 },
1658 { .freq = 5660,
1659 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
1660 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
1661 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
1662 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
1663 0x00, 0x0b, 0x00, 0xf4, 0x00),
1664 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
1665 },
1666 { .freq = 5670,
1667 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
1668 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
1669 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
1670 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
1671 0x00, 0x0b, 0x00, 0xf4, 0x00),
1672 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
1673 },
1674 { .freq = 5680,
1675 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
1676 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
1677 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
1678 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
1679 0x00, 0x0b, 0x00, 0xf4, 0x00),
1680 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
1681 },
1682 { .freq = 5690,
1683 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
1684 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
1685 0x70, 0x07, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
1686 0x00, 0xf4, 0x00, 0x07, 0x00, 0x04, 0x00, 0x7f,
1687 0x00, 0x0b, 0x00, 0xf4, 0x00),
1688 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
1689 },
1690 { .freq = 5700,
1691 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
1692 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1693 0x40, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1694 0x00, 0xf2, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
1695 0x00, 0x0a, 0x00, 0xf2, 0x00),
1696 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
1697 },
1698 { .freq = 5710,
1699 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
1700 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1701 0x40, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1702 0x00, 0xf2, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
1703 0x00, 0x0a, 0x00, 0xf2, 0x00),
1704 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
1705 },
1706 { .freq = 5720,
1707 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
1708 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1709 0x40, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1710 0x00, 0xf2, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
1711 0x00, 0x0a, 0x00, 0xf2, 0x00),
1712 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
1713 },
1714 { .freq = 5725,
1715 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
1716 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1717 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1718 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
1719 0x00, 0x0a, 0x00, 0xf2, 0x00),
1720 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
1721 },
1722 { .freq = 5730,
1723 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
1724 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1725 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1726 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
1727 0x00, 0x0a, 0x00, 0xf2, 0x00),
1728 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
1729 },
1730 { .freq = 5735,
1731 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
1732 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1733 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1734 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
1735 0x00, 0x0a, 0x00, 0xf2, 0x00),
1736 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
1737 },
1738 { .freq = 5740,
1739 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
1740 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1741 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1742 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
1743 0x00, 0x0a, 0x00, 0xf2, 0x00),
1744 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
1745 },
1746 { .freq = 5745,
1747 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
1748 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1749 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1750 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
1751 0x00, 0x0a, 0x00, 0xf2, 0x00),
1752 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
1753 },
1754 { .freq = 5750,
1755 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
1756 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1757 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1758 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
1759 0x00, 0x0a, 0x00, 0xf2, 0x00),
1760 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
1761 },
1762 { .freq = 5755,
1763 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
1764 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1765 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1766 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
1767 0x00, 0x0a, 0x00, 0xf2, 0x00),
1768 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
1769 },
1770 { .freq = 5760,
1771 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
1772 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1773 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1774 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
1775 0x00, 0x0a, 0x00, 0xf2, 0x00),
1776 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
1777 },
1778 { .freq = 5765,
1779 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
1780 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1781 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1782 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
1783 0x00, 0x0a, 0x00, 0xf2, 0x00),
1784 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
1785 },
1786 { .freq = 5770,
1787 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
1788 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1789 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1790 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
1791 0x00, 0x0a, 0x00, 0xf2, 0x00),
1792 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
1793 },
1794 { .freq = 5775,
1795 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
1796 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1797 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1798 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
1799 0x00, 0x0a, 0x00, 0xf2, 0x00),
1800 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
1801 },
1802 { .freq = 5780,
1803 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
1804 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
1805 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1806 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
1807 0x00, 0x0a, 0x00, 0xf2, 0x00),
1808 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
1809 },
1810 { .freq = 5785,
1811 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
1812 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
1813 0x40, 0x04, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1814 0x00, 0xf2, 0x00, 0x04, 0x00, 0x03, 0x00, 0x7f,
1815 0x00, 0x0a, 0x00, 0xf2, 0x00),
1816 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
1817 },
1818 { .freq = 5790,
1819 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
1820 0x0c, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
1821 0x40, 0x04, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1822 0x00, 0xf2, 0x00, 0x04, 0x00, 0x03, 0x00, 0x7f,
1823 0x00, 0x0a, 0x00, 0xf2, 0x00),
1824 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
1825 },
1826 { .freq = 5795,
1827 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
1828 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
1829 0x40, 0x04, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
1830 0x00, 0xf2, 0x00, 0x04, 0x00, 0x03, 0x00, 0x7f,
1831 0x00, 0x0a, 0x00, 0xf2, 0x00),
1832 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
1833 },
1834 { .freq = 5800,
1835 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
1836 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1837 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1838 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
1839 0x00, 0x09, 0x00, 0xf0, 0x00),
1840 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
1841 },
1842 { .freq = 5805,
1843 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
1844 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1845 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1846 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
1847 0x00, 0x09, 0x00, 0xf0, 0x00),
1848 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
1849 },
1850 { .freq = 5810,
1851 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
1852 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1853 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1854 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
1855 0x00, 0x09, 0x00, 0xf0, 0x00),
1856 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
1857 },
1858 { .freq = 5815,
1859 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
1860 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1861 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1862 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
1863 0x00, 0x09, 0x00, 0xf0, 0x00),
1864 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
1865 },
1866 { .freq = 5820,
1867 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
1868 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1869 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1870 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
1871 0x00, 0x09, 0x00, 0xf0, 0x00),
1872 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
1873 },
1874 { .freq = 5825,
1875 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
1876 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1877 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1878 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
1879 0x00, 0x09, 0x00, 0xf0, 0x00),
1880 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
1881 },
1882 { .freq = 5830,
1883 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
1884 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1885 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1886 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
1887 0x00, 0x09, 0x00, 0xf0, 0x00),
1888 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
1889 },
1890 { .freq = 5840,
1891 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
1892 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1893 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1894 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
1895 0x00, 0x09, 0x00, 0xf0, 0x00),
1896 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
1897 },
1898 { .freq = 5850,
1899 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
1900 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1901 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1902 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
1903 0x00, 0x09, 0x00, 0xf0, 0x00),
1904 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
1905 },
1906 { .freq = 5860,
1907 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
1908 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1909 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1910 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
1911 0x00, 0x09, 0x00, 0xf0, 0x00),
1912 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
1913 },
1914 { .freq = 5870,
1915 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
1916 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1917 0x20, 0x02, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1918 0x00, 0xf0, 0x00, 0x02, 0x00, 0x02, 0x00, 0x7f,
1919 0x00, 0x09, 0x00, 0xf0, 0x00),
1920 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
1921 },
1922 { .freq = 5880,
1923 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
1924 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1925 0x20, 0x02, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1926 0x00, 0xf0, 0x00, 0x02, 0x00, 0x02, 0x00, 0x7f,
1927 0x00, 0x09, 0x00, 0xf0, 0x00),
1928 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
1929 },
1930 { .freq = 5890,
1931 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
1932 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
1933 0x20, 0x02, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
1934 0x00, 0xf0, 0x00, 0x02, 0x00, 0x02, 0x00, 0x7f,
1935 0x00, 0x09, 0x00, 0xf0, 0x00),
1936 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
1937 },
1938 { .freq = 5900,
1939 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
1940 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
1941 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x07,
1942 0x00, 0xf0, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
1943 0x00, 0x07, 0x00, 0xf0, 0x00),
1944 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
1945 },
1946 { .freq = 5910,
1947 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
1948 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
1949 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x07,
1950 0x00, 0xf0, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f,
1951 0x00, 0x07, 0x00, 0xf0, 0x00),
1952 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
1953 },
1954 { .freq = 2412,
1955 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
1956 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
1957 0x00, 0x00, 0xff, 0x00, 0x04, 0x00, 0x70, 0x00,
1958 0x0f, 0x00, 0x0e, 0x00, 0xff, 0x00, 0x04, 0x00,
1959 0x70, 0x00, 0x0f, 0x00, 0x0e),
1960 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
1961 },
1962 { .freq = 2417,
1963 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
1964 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
1965 0x00, 0x00, 0xff, 0x00, 0x04, 0x00, 0x70, 0x00,
1966 0x0f, 0x00, 0x0e, 0x00, 0xff, 0x00, 0x04, 0x00,
1967 0x70, 0x00, 0x0f, 0x00, 0x0e),
1968 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
1969 },
1970 { .freq = 2422,
1971 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
1972 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
1973 0x00, 0x00, 0xff, 0x00, 0x04, 0x00, 0x70, 0x00,
1974 0x0f, 0x00, 0x0e, 0x00, 0xff, 0x00, 0x04, 0x00,
1975 0x70, 0x00, 0x0f, 0x00, 0x0e),
1976 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
1977 },
1978 { .freq = 2427,
1979 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
1980 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
1981 0x00, 0x00, 0xfd, 0x00, 0x04, 0x00, 0x70, 0x00,
1982 0x0f, 0x00, 0x0e, 0x00, 0xfd, 0x00, 0x04, 0x00,
1983 0x70, 0x00, 0x0f, 0x00, 0x0e),
1984 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
1985 },
1986 { .freq = 2432,
1987 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
1988 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
1989 0x00, 0x00, 0xfb, 0x00, 0x04, 0x00, 0x70, 0x00,
1990 0x0f, 0x00, 0x0e, 0x00, 0xfb, 0x00, 0x04, 0x00,
1991 0x70, 0x00, 0x0f, 0x00, 0x0e),
1992 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
1993 },
1994 { .freq = 2437,
1995 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
1996 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
1997 0x00, 0x00, 0xfa, 0x00, 0x04, 0x00, 0x70, 0x00,
1998 0x0f, 0x00, 0x0e, 0x00, 0xfa, 0x00, 0x04, 0x00,
1999 0x70, 0x00, 0x0f, 0x00, 0x0e),
2000 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
2001 },
2002 { .freq = 2442,
2003 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
2004 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
2005 0x00, 0x00, 0xf8, 0x00, 0x04, 0x00, 0x70, 0x00,
2006 0x0f, 0x00, 0x0e, 0x00, 0xf8, 0x00, 0x04, 0x00,
2007 0x70, 0x00, 0x0f, 0x00, 0x0e),
2008 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
2009 },
2010 { .freq = 2447,
2011 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
2012 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
2013 0x00, 0x00, 0xf7, 0x00, 0x04, 0x00, 0x70, 0x00,
2014 0x0f, 0x00, 0x0e, 0x00, 0xf7, 0x00, 0x04, 0x00,
2015 0x70, 0x00, 0x0f, 0x00, 0x0e),
2016 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
2017 },
2018 { .freq = 2452,
2019 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
2020 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
2021 0x00, 0x00, 0xf6, 0x00, 0x04, 0x00, 0x70, 0x00,
2022 0x0f, 0x00, 0x0e, 0x00, 0xf6, 0x00, 0x04, 0x00,
2023 0x70, 0x00, 0x0f, 0x00, 0x0e),
2024 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
2025 },
2026 { .freq = 2457,
2027 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
2028 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
2029 0x00, 0x00, 0xf5, 0x00, 0x04, 0x00, 0x70, 0x00,
2030 0x0f, 0x00, 0x0e, 0x00, 0xf5, 0x00, 0x04, 0x00,
2031 0x70, 0x00, 0x0f, 0x00, 0x0e),
2032 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
2033 },
2034 { .freq = 2462,
2035 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
2036 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
2037 0x00, 0x00, 0xf4, 0x00, 0x04, 0x00, 0x70, 0x00,
2038 0x0f, 0x00, 0x0e, 0x00, 0xf4, 0x00, 0x04, 0x00,
2039 0x70, 0x00, 0x0f, 0x00, 0x0e),
2040 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
2041 },
2042 { .freq = 2467,
2043 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
2044 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
2045 0x00, 0x00, 0xf3, 0x00, 0x04, 0x00, 0x70, 0x00,
2046 0x0f, 0x00, 0x0e, 0x00, 0xf3, 0x00, 0x04, 0x00,
2047 0x70, 0x00, 0x0f, 0x00, 0x0e),
2048 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
2049 },
2050 { .freq = 2472,
2051 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
2052 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
2053 0x00, 0x00, 0xf2, 0x00, 0x04, 0x00, 0x70, 0x00,
2054 0x0f, 0x00, 0x0e, 0x00, 0xf2, 0x00, 0x04, 0x00,
2055 0x70, 0x00, 0x0f, 0x00, 0x0e),
2056 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
2057 },
2058 { .freq = 2484,
2059 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
2060 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
2061 0x00, 0x00, 0xf0, 0x00, 0x04, 0x00, 0x70, 0x00,
2062 0x0f, 0x00, 0x0e, 0x00, 0xf0, 0x00, 0x04, 0x00,
2063 0x70, 0x00, 0x0f, 0x00, 0x0e),
2064 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
2065 },
2066};
2067
2068static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = {
2069 { .freq = 4920,
2070 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
2071 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
2072 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0f,
2073 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
2074 0x00, 0x0f, 0x00, 0x6f, 0x00),
2075 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
2076 },
2077 { .freq = 4930,
2078 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
2079 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
2080 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
2081 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
2082 0x00, 0x0e, 0x00, 0x6f, 0x00),
2083 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
2084 },
2085 { .freq = 4940,
2086 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
2087 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
2088 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
2089 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
2090 0x00, 0x0e, 0x00, 0x6f, 0x00),
2091 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
2092 },
2093 { .freq = 4950,
2094 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
2095 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
2096 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
2097 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
2098 0x00, 0x0e, 0x00, 0x6f, 0x00),
2099 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
2100 },
2101 { .freq = 4960,
2102 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
2103 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2104 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0e,
2105 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
2106 0x00, 0x0e, 0x00, 0x6f, 0x00),
2107 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
2108 },
2109 { .freq = 4970,
2110 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
2111 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2112 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
2113 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
2114 0x00, 0x0d, 0x00, 0x6f, 0x00),
2115 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
2116 },
2117 { .freq = 4980,
2118 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
2119 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2120 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
2121 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
2122 0x00, 0x0d, 0x00, 0x6f, 0x00),
2123 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
2124 },
2125 { .freq = 4990,
2126 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
2127 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2128 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
2129 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
2130 0x00, 0x0d, 0x00, 0x6f, 0x00),
2131 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
2132 },
2133 { .freq = 5000,
2134 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
2135 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2136 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
2137 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
2138 0x00, 0x0d, 0x00, 0x6f, 0x00),
2139 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
2140 },
2141 { .freq = 5010,
2142 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
2143 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2144 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
2145 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
2146 0x00, 0x0d, 0x00, 0x6f, 0x00),
2147 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
2148 },
2149 { .freq = 5020,
2150 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
2151 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2152 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0d,
2153 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
2154 0x00, 0x0d, 0x00, 0x6f, 0x00),
2155 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
2156 },
2157 { .freq = 5030,
2158 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
2159 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2160 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
2161 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
2162 0x00, 0x0c, 0x00, 0x6f, 0x00),
2163 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
2164 },
2165 { .freq = 5040,
2166 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
2167 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2168 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
2169 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
2170 0x00, 0x0c, 0x00, 0x6f, 0x00),
2171 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
2172 },
2173 { .freq = 5050,
2174 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
2175 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2176 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
2177 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
2178 0x00, 0x0c, 0x00, 0x6f, 0x00),
2179 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
2180 },
2181 { .freq = 5060,
2182 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
2183 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2184 0xff, 0xfd, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
2185 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x70,
2186 0x00, 0x0c, 0x00, 0x6f, 0x00),
2187 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
2188 },
2189 { .freq = 5070,
2190 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
2191 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2192 0xff, 0xfd, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
2193 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x70,
2194 0x00, 0x0b, 0x00, 0x6f, 0x00),
2195 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
2196 },
2197 { .freq = 5080,
2198 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
2199 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2200 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
2201 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
2202 0x00, 0x0b, 0x00, 0x6f, 0x00),
2203 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
2204 },
2205 { .freq = 5090,
2206 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
2207 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
2208 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
2209 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
2210 0x00, 0x0b, 0x00, 0x6f, 0x00),
2211 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
2212 },
2213 { .freq = 5100,
2214 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
2215 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2216 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
2217 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
2218 0x00, 0x0b, 0x00, 0x6f, 0x00),
2219 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
2220 },
2221 { .freq = 5110,
2222 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
2223 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2224 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
2225 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
2226 0x00, 0x0b, 0x00, 0x6f, 0x00),
2227 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
2228 },
2229 { .freq = 5120,
2230 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
2231 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2232 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
2233 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
2234 0x00, 0x0b, 0x00, 0x6f, 0x00),
2235 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
2236 },
2237 { .freq = 5130,
2238 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
2239 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2240 0xff, 0xfb, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0a,
2241 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x70,
2242 0x00, 0x0a, 0x00, 0x6f, 0x00),
2243 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
2244 },
2245 { .freq = 5140,
2246 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
2247 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2248 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x0a,
2249 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
2250 0x00, 0x0a, 0x00, 0x6f, 0x00),
2251 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
2252 },
2253 { .freq = 5160,
2254 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
2255 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2256 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x09,
2257 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
2258 0x00, 0x09, 0x00, 0x6e, 0x00),
2259 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
2260 },
2261 { .freq = 5170,
2262 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
2263 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2264 0xff, 0xfb, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
2265 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x06, 0x00, 0x70,
2266 0x00, 0x09, 0x00, 0x6e, 0x00),
2267 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
2268 },
2269 { .freq = 5180,
2270 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
2271 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2272 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
2273 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
2274 0x00, 0x09, 0x00, 0x6e, 0x00),
2275 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
2276 },
2277 { .freq = 5190,
2278 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
2279 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2280 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
2281 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
2282 0x00, 0x09, 0x00, 0x6e, 0x00),
2283 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
2284 },
2285 { .freq = 5200,
2286 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
2287 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2288 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
2289 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
2290 0x00, 0x09, 0x00, 0x6e, 0x00),
2291 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
2292 },
2293 { .freq = 5210,
2294 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
2295 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2296 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
2297 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
2298 0x00, 0x09, 0x00, 0x6e, 0x00),
2299 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
2300 },
2301 { .freq = 5220,
2302 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
2303 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2304 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
2305 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
2306 0x00, 0x09, 0x00, 0x6e, 0x00),
2307 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
2308 },
2309 { .freq = 5230,
2310 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
2311 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2312 0xff, 0xea, 0x00, 0x06, 0x00, 0x70, 0x00, 0x08,
2313 0x00, 0x9e, 0x00, 0xea, 0x00, 0x06, 0x00, 0x70,
2314 0x00, 0x08, 0x00, 0x6e, 0x00),
2315 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
2316 },
2317 { .freq = 5240,
2318 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
2319 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2320 0xff, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
2321 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
2322 0x00, 0x08, 0x00, 0x6d, 0x00),
2323 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
2324 },
2325 { .freq = 5250,
2326 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
2327 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2328 0xff, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
2329 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
2330 0x00, 0x08, 0x00, 0x6d, 0x00),
2331 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
2332 },
2333 { .freq = 5260,
2334 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
2335 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
2336 0xff, 0xd9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
2337 0x00, 0x9d, 0x00, 0xd9, 0x00, 0x05, 0x00, 0x70,
2338 0x00, 0x08, 0x00, 0x6d, 0x00),
2339 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
2340 },
2341 { .freq = 5270,
2342 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
2343 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
2344 0xff, 0xd8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
2345 0x00, 0x9c, 0x00, 0xd8, 0x00, 0x04, 0x00, 0x70,
2346 0x00, 0x07, 0x00, 0x6c, 0x00),
2347 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
2348 },
2349 { .freq = 5280,
2350 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
2351 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
2352 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
2353 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
2354 0x00, 0x07, 0x00, 0x6c, 0x00),
2355 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
2356 },
2357 { .freq = 5290,
2358 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
2359 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
2360 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
2361 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
2362 0x00, 0x07, 0x00, 0x6c, 0x00),
2363 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
2364 },
2365 { .freq = 5300,
2366 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
2367 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
2368 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
2369 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
2370 0x00, 0x07, 0x00, 0x6c, 0x00),
2371 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
2372 },
2373 { .freq = 5310,
2374 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
2375 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
2376 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
2377 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
2378 0x00, 0x07, 0x00, 0x6c, 0x00),
2379 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
2380 },
2381 { .freq = 5320,
2382 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
2383 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
2384 0xff, 0xb8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
2385 0x00, 0x9c, 0x00, 0xb8, 0x00, 0x04, 0x00, 0x70,
2386 0x00, 0x07, 0x00, 0x6c, 0x00),
2387 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
2388 },
2389 { .freq = 5330,
2390 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
2391 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
2392 0xff, 0xb7, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
2393 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x04, 0x00, 0x70,
2394 0x00, 0x07, 0x00, 0x6b, 0x00),
2395 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
2396 },
2397 { .freq = 5340,
2398 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
2399 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
2400 0xff, 0xb7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x07,
2401 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x70,
2402 0x00, 0x07, 0x00, 0x6b, 0x00),
2403 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
2404 },
2405 { .freq = 5350,
2406 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
2407 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
2408 0xff, 0xa7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
2409 0x00, 0x9b, 0x00, 0xa7, 0x00, 0x03, 0x00, 0x70,
2410 0x00, 0x06, 0x00, 0x6b, 0x00),
2411 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
2412 },
2413 { .freq = 5360,
2414 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
2415 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
2416 0xff, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
2417 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
2418 0x00, 0x06, 0x00, 0x6b, 0x00),
2419 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
2420 },
2421 { .freq = 5370,
2422 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
2423 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
2424 0xff, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
2425 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
2426 0x00, 0x06, 0x00, 0x5b, 0x00),
2427 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
2428 },
2429 { .freq = 5380,
2430 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
2431 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
2432 0xff, 0x96, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
2433 0x00, 0x9a, 0x00, 0x96, 0x00, 0x03, 0x00, 0x70,
2434 0x00, 0x06, 0x00, 0x5a, 0x00),
2435 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
2436 },
2437 { .freq = 5390,
2438 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
2439 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
2440 0xff, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
2441 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
2442 0x00, 0x06, 0x00, 0x5a, 0x00),
2443 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
2444 },
2445 { .freq = 5400,
2446 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
2447 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
2448 0xc8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
2449 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
2450 0x00, 0x06, 0x00, 0x5a, 0x00),
2451 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
2452 },
2453 { .freq = 5410,
2454 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
2455 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
2456 0xc8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
2457 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
2458 0x00, 0x05, 0x00, 0x5a, 0x00),
2459 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
2460 },
2461 { .freq = 5420,
2462 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
2463 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
2464 0xc8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
2465 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
2466 0x00, 0x05, 0x00, 0x5a, 0x00),
2467 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
2468 },
2469 { .freq = 5430,
2470 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
2471 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
2472 0xc8, 0x85, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
2473 0x00, 0x99, 0x00, 0x85, 0x00, 0x02, 0x00, 0x70,
2474 0x00, 0x05, 0x00, 0x59, 0x00),
2475 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
2476 },
2477 { .freq = 5440,
2478 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
2479 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
2480 0xc8, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
2481 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
2482 0x00, 0x05, 0x00, 0x59, 0x00),
2483 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
2484 },
2485 { .freq = 5450,
2486 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
2487 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
2488 0xc8, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
2489 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
2490 0x00, 0x05, 0x00, 0x59, 0x00),
2491 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
2492 },
2493 { .freq = 5460,
2494 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
2495 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
2496 0xc8, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x04,
2497 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
2498 0x00, 0x04, 0x00, 0x69, 0x00),
2499 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
2500 },
2501 { .freq = 5470,
2502 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
2503 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
2504 0xc8, 0x74, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
2505 0x00, 0x99, 0x00, 0x74, 0x00, 0x01, 0x00, 0x70,
2506 0x00, 0x04, 0x00, 0x69, 0x00),
2507 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
2508 },
2509 { .freq = 5480,
2510 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
2511 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
2512 0xc8, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
2513 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
2514 0x00, 0x04, 0x00, 0x68, 0x00),
2515 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
2516 },
2517 { .freq = 5490,
2518 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
2519 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
2520 0xc8, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
2521 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
2522 0x00, 0x04, 0x00, 0x68, 0x00),
2523 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
2524 },
2525 { .freq = 5500,
2526 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
2527 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
2528 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
2529 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
2530 0x00, 0x04, 0x00, 0x78, 0x00),
2531 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
2532 },
2533 { .freq = 5510,
2534 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
2535 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
2536 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
2537 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
2538 0x00, 0x04, 0x00, 0x78, 0x00),
2539 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
2540 },
2541 { .freq = 5520,
2542 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
2543 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
2544 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
2545 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
2546 0x00, 0x04, 0x00, 0x78, 0x00),
2547 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
2548 },
2549 { .freq = 5530,
2550 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
2551 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
2552 0x84, 0x63, 0x00, 0x01, 0x00, 0x70, 0x00, 0x03,
2553 0x00, 0x98, 0x00, 0x63, 0x00, 0x01, 0x00, 0x70,
2554 0x00, 0x03, 0x00, 0x78, 0x00),
2555 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
2556 },
2557 { .freq = 5540,
2558 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
2559 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
2560 0x84, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
2561 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
2562 0x00, 0x03, 0x00, 0x77, 0x00),
2563 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
2564 },
2565 { .freq = 5550,
2566 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
2567 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
2568 0x84, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
2569 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
2570 0x00, 0x03, 0x00, 0x77, 0x00),
2571 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
2572 },
2573 { .freq = 5560,
2574 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
2575 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
2576 0x84, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
2577 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
2578 0x00, 0x03, 0x00, 0x77, 0x00),
2579 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
2580 },
2581 { .freq = 5570,
2582 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
2583 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
2584 0x84, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
2585 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
2586 0x00, 0x02, 0x00, 0x76, 0x00),
2587 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
2588 },
2589 { .freq = 5580,
2590 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
2591 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
2592 0x84, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
2593 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
2594 0x00, 0x02, 0x00, 0x76, 0x00),
2595 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
2596 },
2597 { .freq = 5590,
2598 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
2599 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
2600 0x84, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
2601 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
2602 0x00, 0x02, 0x00, 0x76, 0x00),
2603 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
2604 },
2605 { .freq = 5600,
2606 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
2607 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
2608 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
2609 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
2610 0x00, 0x02, 0x00, 0x76, 0x00),
2611 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
2612 },
2613 { .freq = 5610,
2614 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
2615 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
2616 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
2617 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
2618 0x00, 0x02, 0x00, 0x76, 0x00),
2619 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
2620 },
2621 { .freq = 5620,
2622 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
2623 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
2624 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
2625 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
2626 0x00, 0x02, 0x00, 0x76, 0x00),
2627 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
2628 },
2629 { .freq = 5630,
2630 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
2631 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
2632 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
2633 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
2634 0x00, 0x02, 0x00, 0x76, 0x00),
2635 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
2636 },
2637 { .freq = 5640,
2638 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
2639 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
2640 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
2641 0x00, 0x95, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
2642 0x00, 0x02, 0x00, 0x75, 0x00),
2643 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
2644 },
2645 { .freq = 5650,
2646 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
2647 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
2648 0x70, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
2649 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
2650 0x00, 0x01, 0x00, 0x75, 0x00),
2651 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
2652 },
2653 { .freq = 5660,
2654 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
2655 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
2656 0x70, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
2657 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
2658 0x00, 0x01, 0x00, 0x75, 0x00),
2659 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
2660 },
2661 { .freq = 5670,
2662 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
2663 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
2664 0x70, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
2665 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
2666 0x00, 0x01, 0x00, 0x74, 0x00),
2667 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
2668 },
2669 { .freq = 5680,
2670 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
2671 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
2672 0x70, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
2673 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
2674 0x00, 0x01, 0x00, 0x74, 0x00),
2675 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
2676 },
2677 { .freq = 5690,
2678 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
2679 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
2680 0x70, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
2681 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
2682 0x00, 0x01, 0x00, 0x74, 0x00),
2683 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
2684 },
2685 { .freq = 5700,
2686 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
2687 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2688 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
2689 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
2690 0x00, 0x01, 0x00, 0x74, 0x00),
2691 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
2692 },
2693 { .freq = 5710,
2694 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
2695 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2696 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
2697 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
2698 0x00, 0x01, 0x00, 0x74, 0x00),
2699 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
2700 },
2701 { .freq = 5720,
2702 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
2703 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2704 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
2705 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
2706 0x00, 0x01, 0x00, 0x74, 0x00),
2707 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
2708 },
2709 { .freq = 5725,
2710 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
2711 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2712 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
2713 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
2714 0x00, 0x01, 0x00, 0x74, 0x00),
2715 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
2716 },
2717 { .freq = 5730,
2718 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
2719 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2720 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
2721 0x00, 0x94, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
2722 0x00, 0x01, 0x00, 0x84, 0x00),
2723 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
2724 },
2725 { .freq = 5735,
2726 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
2727 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2728 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2729 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
2730 0x00, 0x00, 0x00, 0x83, 0x00),
2731 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
2732 },
2733 { .freq = 5740,
2734 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
2735 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2736 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2737 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
2738 0x00, 0x00, 0x00, 0x83, 0x00),
2739 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
2740 },
2741 { .freq = 5745,
2742 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
2743 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2744 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2745 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
2746 0x00, 0x00, 0x00, 0x83, 0x00),
2747 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
2748 },
2749 { .freq = 5750,
2750 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
2751 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2752 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2753 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
2754 0x00, 0x00, 0x00, 0x83, 0x00),
2755 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
2756 },
2757 { .freq = 5755,
2758 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
2759 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2760 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2761 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
2762 0x00, 0x00, 0x00, 0x83, 0x00),
2763 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
2764 },
2765 { .freq = 5760,
2766 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
2767 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2768 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2769 0x00, 0x93, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
2770 0x00, 0x00, 0x00, 0x83, 0x00),
2771 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
2772 },
2773 { .freq = 5765,
2774 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
2775 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2776 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2777 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
2778 0x00, 0x00, 0x00, 0x82, 0x00),
2779 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
2780 },
2781 { .freq = 5770,
2782 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
2783 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2784 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2785 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
2786 0x00, 0x00, 0x00, 0x82, 0x00),
2787 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
2788 },
2789 { .freq = 5775,
2790 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
2791 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2792 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2793 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
2794 0x00, 0x00, 0x00, 0x82, 0x00),
2795 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
2796 },
2797 { .freq = 5780,
2798 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
2799 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
2800 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2801 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2802 0x00, 0x00, 0x00, 0x82, 0x00),
2803 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
2804 },
2805 { .freq = 5785,
2806 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
2807 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
2808 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2809 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2810 0x00, 0x00, 0x00, 0x82, 0x00),
2811 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
2812 },
2813 { .freq = 5790,
2814 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
2815 0x0c, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
2816 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2817 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2818 0x00, 0x00, 0x00, 0x82, 0x00),
2819 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
2820 },
2821 { .freq = 5795,
2822 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
2823 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
2824 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2825 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2826 0x00, 0x00, 0x00, 0x82, 0x00),
2827 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
2828 },
2829 { .freq = 5800,
2830 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
2831 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2832 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2833 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2834 0x00, 0x00, 0x00, 0x82, 0x00),
2835 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
2836 },
2837 { .freq = 5805,
2838 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
2839 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2840 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2841 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2842 0x00, 0x00, 0x00, 0x82, 0x00),
2843 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
2844 },
2845 { .freq = 5810,
2846 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
2847 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2848 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2849 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2850 0x00, 0x00, 0x00, 0x82, 0x00),
2851 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
2852 },
2853 { .freq = 5815,
2854 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
2855 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2856 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2857 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2858 0x00, 0x00, 0x00, 0x82, 0x00),
2859 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
2860 },
2861 { .freq = 5820,
2862 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
2863 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2864 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2865 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2866 0x00, 0x00, 0x00, 0x82, 0x00),
2867 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
2868 },
2869 { .freq = 5825,
2870 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
2871 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2872 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2873 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2874 0x00, 0x00, 0x00, 0x82, 0x00),
2875 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
2876 },
2877 { .freq = 5830,
2878 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
2879 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2880 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2881 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2882 0x00, 0x00, 0x00, 0x72, 0x00),
2883 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
2884 },
2885 { .freq = 5840,
2886 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
2887 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2888 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2889 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
2890 0x00, 0x00, 0x00, 0x72, 0x00),
2891 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
2892 },
2893 { .freq = 5850,
2894 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
2895 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2896 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2897 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
2898 0x00, 0x00, 0x00, 0x72, 0x00),
2899 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
2900 },
2901 { .freq = 5860,
2902 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
2903 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2904 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2905 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
2906 0x00, 0x00, 0x00, 0x72, 0x00),
2907 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
2908 },
2909 { .freq = 5870,
2910 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
2911 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2912 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2913 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
2914 0x00, 0x00, 0x00, 0x71, 0x00),
2915 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
2916 },
2917 { .freq = 5880,
2918 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
2919 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2920 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2921 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
2922 0x00, 0x00, 0x00, 0x71, 0x00),
2923 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
2924 },
2925 { .freq = 5890,
2926 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
2927 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
2928 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2929 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
2930 0x00, 0x00, 0x00, 0x71, 0x00),
2931 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
2932 },
2933 { .freq = 5900,
2934 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
2935 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
2936 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2937 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
2938 0x00, 0x00, 0x00, 0x71, 0x00),
2939 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
2940 },
2941 { .freq = 5910,
2942 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
2943 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
2944 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
2945 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
2946 0x00, 0x00, 0x00, 0x71, 0x00),
2947 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
2948 },
2949 { .freq = 2412,
2950 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
2951 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
2952 0x00, 0x00, 0x1f, 0x00, 0x03, 0x00, 0x70, 0x00,
2953 0x0f, 0x00, 0x0b, 0x00, 0x1f, 0x00, 0x03, 0x00,
2954 0x70, 0x00, 0x0f, 0x00, 0x0b),
2955 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
2956 },
2957 { .freq = 2417,
2958 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
2959 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
2960 0x00, 0x00, 0x1f, 0x00, 0x03, 0x00, 0x70, 0x00,
2961 0x0f, 0x00, 0x0a, 0x00, 0x1f, 0x00, 0x03, 0x00,
2962 0x70, 0x00, 0x0f, 0x00, 0x0a),
2963 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
2964 },
2965 { .freq = 2422,
2966 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
2967 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
2968 0x00, 0x00, 0x0e, 0x00, 0x03, 0x00, 0x70, 0x00,
2969 0x0f, 0x00, 0x0a, 0x00, 0x0e, 0x00, 0x03, 0x00,
2970 0x70, 0x00, 0x0f, 0x00, 0x0a),
2971 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
2972 },
2973 { .freq = 2427,
2974 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
2975 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
2976 0x00, 0x00, 0x0d, 0x00, 0x03, 0x00, 0x70, 0x00,
2977 0x0e, 0x00, 0x0a, 0x00, 0x0d, 0x00, 0x03, 0x00,
2978 0x70, 0x00, 0x0e, 0x00, 0x0a),
2979 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
2980 },
2981 { .freq = 2432,
2982 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
2983 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
2984 0x00, 0x00, 0x0c, 0x00, 0x03, 0x00, 0x70, 0x00,
2985 0x0e, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x03, 0x00,
2986 0x70, 0x00, 0x0e, 0x00, 0x0a),
2987 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
2988 },
2989 { .freq = 2437,
2990 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
2991 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
2992 0x00, 0x00, 0x0b, 0x00, 0x03, 0x00, 0x70, 0x00,
2993 0x0e, 0x00, 0x0a, 0x00, 0x0b, 0x00, 0x03, 0x00,
2994 0x70, 0x00, 0x0e, 0x00, 0x0a),
2995 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
2996 },
2997 { .freq = 2442,
2998 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
2999 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3000 0x00, 0x00, 0x09, 0x00, 0x03, 0x00, 0x70, 0x00,
3001 0x0e, 0x00, 0x0a, 0x00, 0x09, 0x00, 0x03, 0x00,
3002 0x70, 0x00, 0x0e, 0x00, 0x0a),
3003 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
3004 },
3005 { .freq = 2447,
3006 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
3007 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
3008 0x00, 0x00, 0x08, 0x00, 0x02, 0x00, 0x70, 0x00,
3009 0x0e, 0x00, 0x09, 0x00, 0x08, 0x00, 0x02, 0x00,
3010 0x70, 0x00, 0x0e, 0x00, 0x09),
3011 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
3012 },
3013 { .freq = 2452,
3014 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
3015 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
3016 0x00, 0x00, 0x07, 0x00, 0x02, 0x00, 0x70, 0x00,
3017 0x0e, 0x00, 0x09, 0x00, 0x07, 0x00, 0x02, 0x00,
3018 0x70, 0x00, 0x0e, 0x00, 0x09),
3019 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
3020 },
3021 { .freq = 2457,
3022 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
3023 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
3024 0x00, 0x00, 0x06, 0x00, 0x02, 0x00, 0x70, 0x00,
3025 0x0d, 0x00, 0x09, 0x00, 0x06, 0x00, 0x02, 0x00,
3026 0x70, 0x00, 0x0d, 0x00, 0x09),
3027 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
3028 },
3029 { .freq = 2462,
3030 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
3031 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
3032 0x00, 0x00, 0x05, 0x00, 0x02, 0x00, 0x70, 0x00,
3033 0x0d, 0x00, 0x09, 0x00, 0x05, 0x00, 0x02, 0x00,
3034 0x70, 0x00, 0x0d, 0x00, 0x09),
3035 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
3036 },
3037 { .freq = 2467,
3038 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
3039 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
3040 0x00, 0x00, 0x04, 0x00, 0x02, 0x00, 0x70, 0x00,
3041 0x0d, 0x00, 0x08, 0x00, 0x04, 0x00, 0x02, 0x00,
3042 0x70, 0x00, 0x0d, 0x00, 0x08),
3043 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
3044 },
3045 { .freq = 2472,
3046 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
3047 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
3048 0x00, 0x00, 0x03, 0x00, 0x02, 0x00, 0x70, 0x00,
3049 0x0d, 0x00, 0x08, 0x00, 0x03, 0x00, 0x02, 0x00,
3050 0x70, 0x00, 0x0d, 0x00, 0x08),
3051 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
3052 },
3053 { .freq = 2484,
3054 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
3055 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
3056 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
3057 0x0d, 0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00,
3058 0x70, 0x00, 0x0d, 0x00, 0x08),
3059 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
3060 },
3061};
3062
3063static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = {
3064 { .freq = 4920,
3065 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
3066 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
3067 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3068 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3069 0x00, 0x0f, 0x00, 0x6f, 0x00),
3070 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
3071 },
3072 { .freq = 4930,
3073 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
3074 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
3075 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3076 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3077 0x00, 0x0f, 0x00, 0x6f, 0x00),
3078 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
3079 },
3080 { .freq = 4940,
3081 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
3082 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
3083 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3084 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3085 0x00, 0x0f, 0x00, 0x6f, 0x00),
3086 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
3087 },
3088 { .freq = 4950,
3089 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
3090 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
3091 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3092 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3093 0x00, 0x0f, 0x00, 0x6f, 0x00),
3094 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
3095 },
3096 { .freq = 4960,
3097 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
3098 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3099 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3100 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3101 0x00, 0x0f, 0x00, 0x6f, 0x00),
3102 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
3103 },
3104 { .freq = 4970,
3105 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
3106 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3107 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3108 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3109 0x00, 0x0f, 0x00, 0x6f, 0x00),
3110 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
3111 },
3112 { .freq = 4980,
3113 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
3114 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3115 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3116 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3117 0x00, 0x0f, 0x00, 0x6f, 0x00),
3118 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
3119 },
3120 { .freq = 4990,
3121 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
3122 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3123 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3124 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3125 0x00, 0x0f, 0x00, 0x6f, 0x00),
3126 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
3127 },
3128 { .freq = 5000,
3129 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
3130 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3131 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3132 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3133 0x00, 0x0f, 0x00, 0x6f, 0x00),
3134 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
3135 },
3136 { .freq = 5010,
3137 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
3138 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3139 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3140 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3141 0x00, 0x0f, 0x00, 0x6f, 0x00),
3142 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
3143 },
3144 { .freq = 5020,
3145 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
3146 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3147 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3148 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3149 0x00, 0x0f, 0x00, 0x6f, 0x00),
3150 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
3151 },
3152 { .freq = 5030,
3153 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
3154 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3155 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3156 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3157 0x00, 0x0f, 0x00, 0x6f, 0x00),
3158 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
3159 },
3160 { .freq = 5040,
3161 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
3162 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3163 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3164 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3165 0x00, 0x0f, 0x00, 0x6f, 0x00),
3166 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
3167 },
3168 { .freq = 5050,
3169 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
3170 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3171 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3172 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3173 0x00, 0x0f, 0x00, 0x6f, 0x00),
3174 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
3175 },
3176 { .freq = 5060,
3177 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
3178 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3179 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3180 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
3181 0x00, 0x0f, 0x00, 0x6f, 0x00),
3182 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
3183 },
3184 { .freq = 5070,
3185 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
3186 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3187 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3188 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
3189 0x00, 0x0f, 0x00, 0x6f, 0x00),
3190 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
3191 },
3192 { .freq = 5080,
3193 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
3194 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3195 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3196 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
3197 0x00, 0x0f, 0x00, 0x6f, 0x00),
3198 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
3199 },
3200 { .freq = 5090,
3201 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
3202 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3203 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
3204 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
3205 0x00, 0x0f, 0x00, 0x6f, 0x00),
3206 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
3207 },
3208 { .freq = 5100,
3209 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
3210 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3211 0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
3212 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
3213 0x00, 0x0f, 0x00, 0x6f, 0x00),
3214 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
3215 },
3216 { .freq = 5110,
3217 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
3218 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3219 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
3220 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
3221 0x00, 0x0f, 0x00, 0x6f, 0x00),
3222 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
3223 },
3224 { .freq = 5120,
3225 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
3226 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3227 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
3228 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
3229 0x00, 0x0f, 0x00, 0x6f, 0x00),
3230 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
3231 },
3232 { .freq = 5130,
3233 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
3234 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3235 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
3236 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
3237 0x00, 0x0f, 0x00, 0x6f, 0x00),
3238 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
3239 },
3240 { .freq = 5140,
3241 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
3242 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3243 0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
3244 0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
3245 0x00, 0x0f, 0x00, 0x6f, 0x00),
3246 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
3247 },
3248 { .freq = 5160,
3249 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
3250 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3251 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
3252 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
3253 0x00, 0x0e, 0x00, 0x6f, 0x00),
3254 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
3255 },
3256 { .freq = 5170,
3257 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
3258 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3259 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
3260 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
3261 0x00, 0x0e, 0x00, 0x6f, 0x00),
3262 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
3263 },
3264 { .freq = 5180,
3265 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
3266 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3267 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
3268 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
3269 0x00, 0x0e, 0x00, 0x6f, 0x00),
3270 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
3271 },
3272 { .freq = 5190,
3273 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
3274 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3275 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
3276 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
3277 0x00, 0x0d, 0x00, 0x6f, 0x00),
3278 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
3279 },
3280 { .freq = 5200,
3281 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
3282 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3283 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
3284 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
3285 0x00, 0x0d, 0x00, 0x6f, 0x00),
3286 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
3287 },
3288 { .freq = 5210,
3289 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
3290 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3291 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
3292 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
3293 0x00, 0x0d, 0x00, 0x6f, 0x00),
3294 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
3295 },
3296 { .freq = 5220,
3297 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
3298 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
3299 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
3300 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
3301 0x00, 0x0d, 0x00, 0x6f, 0x00),
3302 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
3303 },
3304 { .freq = 5230,
3305 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
3306 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
3307 0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
3308 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
3309 0x00, 0x0d, 0x00, 0x6f, 0x00),
3310 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
3311 },
3312 { .freq = 5240,
3313 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
3314 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
3315 0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
3316 0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
3317 0x00, 0x0d, 0x00, 0x6f, 0x00),
3318 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
3319 },
3320 { .freq = 5250,
3321 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
3322 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
3323 0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
3324 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
3325 0x00, 0x0d, 0x00, 0x6f, 0x00),
3326 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
3327 },
3328 { .freq = 5260,
3329 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
3330 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
3331 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
3332 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
3333 0x00, 0x0d, 0x00, 0x6f, 0x00),
3334 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
3335 },
3336 { .freq = 5270,
3337 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
3338 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
3339 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
3340 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
3341 0x00, 0x0c, 0x00, 0x6f, 0x00),
3342 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
3343 },
3344 { .freq = 5280,
3345 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
3346 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
3347 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
3348 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
3349 0x00, 0x0c, 0x00, 0x6f, 0x00),
3350 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
3351 },
3352 { .freq = 5290,
3353 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
3354 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
3355 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
3356 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
3357 0x00, 0x0c, 0x00, 0x6f, 0x00),
3358 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
3359 },
3360 { .freq = 5300,
3361 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
3362 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
3363 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
3364 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
3365 0x00, 0x0c, 0x00, 0x6f, 0x00),
3366 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
3367 },
3368 { .freq = 5310,
3369 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
3370 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
3371 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
3372 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
3373 0x00, 0x0c, 0x00, 0x6f, 0x00),
3374 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
3375 },
3376 { .freq = 5320,
3377 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
3378 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
3379 0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
3380 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
3381 0x00, 0x0c, 0x00, 0x6f, 0x00),
3382 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
3383 },
3384 { .freq = 5330,
3385 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
3386 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
3387 0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
3388 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
3389 0x00, 0x0b, 0x00, 0x6f, 0x00),
3390 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
3391 },
3392 { .freq = 5340,
3393 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
3394 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
3395 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
3396 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
3397 0x00, 0x0b, 0x00, 0x6f, 0x00),
3398 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
3399 },
3400 { .freq = 5350,
3401 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
3402 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
3403 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
3404 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
3405 0x00, 0x0b, 0x00, 0x6f, 0x00),
3406 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
3407 },
3408 { .freq = 5360,
3409 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
3410 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
3411 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
3412 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
3413 0x00, 0x0a, 0x00, 0x6f, 0x00),
3414 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
3415 },
3416 { .freq = 5370,
3417 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
3418 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
3419 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
3420 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
3421 0x00, 0x0a, 0x00, 0x6f, 0x00),
3422 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
3423 },
3424 { .freq = 5380,
3425 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
3426 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
3427 0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
3428 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
3429 0x00, 0x0a, 0x00, 0x6f, 0x00),
3430 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
3431 },
3432 { .freq = 5390,
3433 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
3434 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
3435 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
3436 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
3437 0x00, 0x0a, 0x00, 0x6f, 0x00),
3438 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
3439 },
3440 { .freq = 5400,
3441 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
3442 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
3443 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
3444 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
3445 0x00, 0x0a, 0x00, 0x6f, 0x00),
3446 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
3447 },
3448 { .freq = 5410,
3449 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
3450 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
3451 0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
3452 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
3453 0x00, 0x0a, 0x00, 0x6f, 0x00),
3454 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
3455 },
3456 { .freq = 5420,
3457 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
3458 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
3459 0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
3460 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
3461 0x00, 0x0a, 0x00, 0x6f, 0x00),
3462 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
3463 },
3464 { .freq = 5430,
3465 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
3466 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
3467 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
3468 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
3469 0x00, 0x0a, 0x00, 0x6f, 0x00),
3470 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
3471 },
3472 { .freq = 5440,
3473 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
3474 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
3475 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
3476 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
3477 0x00, 0x09, 0x00, 0x6f, 0x00),
3478 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
3479 },
3480 { .freq = 5450,
3481 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
3482 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
3483 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
3484 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
3485 0x00, 0x09, 0x00, 0x6f, 0x00),
3486 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
3487 },
3488 { .freq = 5460,
3489 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
3490 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
3491 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
3492 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
3493 0x00, 0x09, 0x00, 0x6f, 0x00),
3494 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
3495 },
3496 { .freq = 5470,
3497 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
3498 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
3499 0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
3500 0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
3501 0x00, 0x09, 0x00, 0x6f, 0x00),
3502 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
3503 },
3504 { .freq = 5480,
3505 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
3506 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
3507 0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
3508 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
3509 0x00, 0x09, 0x00, 0x6f, 0x00),
3510 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
3511 },
3512 { .freq = 5490,
3513 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
3514 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
3515 0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
3516 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
3517 0x00, 0x09, 0x00, 0x6f, 0x00),
3518 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
3519 },
3520 { .freq = 5500,
3521 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
3522 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
3523 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
3524 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
3525 0x00, 0x09, 0x00, 0x6f, 0x00),
3526 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
3527 },
3528 { .freq = 5510,
3529 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
3530 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
3531 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
3532 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
3533 0x00, 0x09, 0x00, 0x6f, 0x00),
3534 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
3535 },
3536 { .freq = 5520,
3537 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
3538 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
3539 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
3540 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
3541 0x00, 0x09, 0x00, 0x6f, 0x00),
3542 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
3543 },
3544 { .freq = 5530,
3545 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
3546 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
3547 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
3548 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
3549 0x00, 0x09, 0x00, 0x6f, 0x00),
3550 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
3551 },
3552 { .freq = 5540,
3553 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
3554 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
3555 0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
3556 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
3557 0x00, 0x09, 0x00, 0x6f, 0x00),
3558 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
3559 },
3560 { .freq = 5550,
3561 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
3562 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
3563 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
3564 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
3565 0x00, 0x09, 0x00, 0x6f, 0x00),
3566 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
3567 },
3568 { .freq = 5560,
3569 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
3570 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
3571 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
3572 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
3573 0x00, 0x09, 0x00, 0x6f, 0x00),
3574 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
3575 },
3576 { .freq = 5570,
3577 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
3578 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
3579 0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
3580 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
3581 0x00, 0x09, 0x00, 0x6f, 0x00),
3582 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
3583 },
3584 { .freq = 5580,
3585 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
3586 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
3587 0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
3588 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
3589 0x00, 0x08, 0x00, 0x6f, 0x00),
3590 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
3591 },
3592 { .freq = 5590,
3593 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
3594 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
3595 0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
3596 0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
3597 0x00, 0x08, 0x00, 0x6f, 0x00),
3598 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
3599 },
3600 { .freq = 5600,
3601 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
3602 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
3603 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
3604 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
3605 0x00, 0x08, 0x00, 0x6f, 0x00),
3606 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
3607 },
3608 { .freq = 5610,
3609 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
3610 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
3611 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
3612 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
3613 0x00, 0x08, 0x00, 0x6f, 0x00),
3614 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
3615 },
3616 { .freq = 5620,
3617 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
3618 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
3619 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
3620 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
3621 0x00, 0x07, 0x00, 0x6f, 0x00),
3622 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
3623 },
3624 { .freq = 5630,
3625 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
3626 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
3627 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
3628 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
3629 0x00, 0x07, 0x00, 0x6f, 0x00),
3630 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
3631 },
3632 { .freq = 5640,
3633 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
3634 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
3635 0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
3636 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
3637 0x00, 0x07, 0x00, 0x6f, 0x00),
3638 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
3639 },
3640 { .freq = 5650,
3641 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
3642 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
3643 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
3644 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
3645 0x00, 0x07, 0x00, 0x6f, 0x00),
3646 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
3647 },
3648 { .freq = 5660,
3649 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
3650 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
3651 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3652 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
3653 0x00, 0x06, 0x00, 0x6f, 0x00),
3654 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
3655 },
3656 { .freq = 5670,
3657 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
3658 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
3659 0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3660 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3661 0x00, 0x06, 0x00, 0x6f, 0x00),
3662 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
3663 },
3664 { .freq = 5680,
3665 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
3666 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
3667 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3668 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3669 0x00, 0x06, 0x00, 0x6f, 0x00),
3670 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
3671 },
3672 { .freq = 5690,
3673 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
3674 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
3675 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3676 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3677 0x00, 0x06, 0x00, 0x6f, 0x00),
3678 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
3679 },
3680 { .freq = 5700,
3681 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
3682 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
3683 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3684 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3685 0x00, 0x06, 0x00, 0x6e, 0x00),
3686 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
3687 },
3688 { .freq = 5710,
3689 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
3690 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
3691 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3692 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3693 0x00, 0x06, 0x00, 0x6e, 0x00),
3694 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
3695 },
3696 { .freq = 5720,
3697 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
3698 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
3699 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3700 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3701 0x00, 0x06, 0x00, 0x6e, 0x00),
3702 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
3703 },
3704 { .freq = 5725,
3705 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
3706 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
3707 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3708 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3709 0x00, 0x06, 0x00, 0x6e, 0x00),
3710 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
3711 },
3712 { .freq = 5730,
3713 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
3714 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
3715 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3716 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3717 0x00, 0x06, 0x00, 0x6e, 0x00),
3718 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
3719 },
3720 { .freq = 5735,
3721 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
3722 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
3723 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3724 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3725 0x00, 0x06, 0x00, 0x6d, 0x00),
3726 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
3727 },
3728 { .freq = 5740,
3729 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
3730 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
3731 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3732 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3733 0x00, 0x06, 0x00, 0x6d, 0x00),
3734 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
3735 },
3736 { .freq = 5745,
3737 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
3738 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
3739 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
3740 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
3741 0x00, 0x06, 0x00, 0x6d, 0x00),
3742 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
3743 },
3744 { .freq = 5750,
3745 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
3746 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
3747 0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3748 0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
3749 0x00, 0x05, 0x00, 0x6d, 0x00),
3750 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
3751 },
3752 { .freq = 5755,
3753 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
3754 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
3755 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3756 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
3757 0x00, 0x05, 0x00, 0x6c, 0x00),
3758 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
3759 },
3760 { .freq = 5760,
3761 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
3762 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
3763 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3764 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
3765 0x00, 0x05, 0x00, 0x6c, 0x00),
3766 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
3767 },
3768 { .freq = 5765,
3769 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
3770 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
3771 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3772 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
3773 0x00, 0x05, 0x00, 0x6c, 0x00),
3774 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
3775 },
3776 { .freq = 5770,
3777 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
3778 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
3779 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3780 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
3781 0x00, 0x05, 0x00, 0x6b, 0x00),
3782 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
3783 },
3784 { .freq = 5775,
3785 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
3786 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
3787 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3788 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
3789 0x00, 0x05, 0x00, 0x6b, 0x00),
3790 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
3791 },
3792 { .freq = 5780,
3793 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
3794 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
3795 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3796 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
3797 0x00, 0x05, 0x00, 0x6b, 0x00),
3798 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
3799 },
3800 { .freq = 5785,
3801 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
3802 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3803 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3804 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
3805 0x00, 0x05, 0x00, 0x6b, 0x00),
3806 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
3807 },
3808 { .freq = 5790,
3809 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
3810 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3811 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3812 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
3813 0x00, 0x05, 0x00, 0x6b, 0x00),
3814 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
3815 },
3816 { .freq = 5795,
3817 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
3818 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3819 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3820 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3821 0x00, 0x05, 0x00, 0x6b, 0x00),
3822 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
3823 },
3824 { .freq = 5800,
3825 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
3826 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3827 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3828 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3829 0x00, 0x05, 0x00, 0x6b, 0x00),
3830 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
3831 },
3832 { .freq = 5805,
3833 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
3834 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3835 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3836 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3837 0x00, 0x05, 0x00, 0x6a, 0x00),
3838 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
3839 },
3840 { .freq = 5810,
3841 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
3842 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3843 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3844 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3845 0x00, 0x05, 0x00, 0x6a, 0x00),
3846 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
3847 },
3848 { .freq = 5815,
3849 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
3850 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3851 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3852 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3853 0x00, 0x05, 0x00, 0x6a, 0x00),
3854 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
3855 },
3856 { .freq = 5820,
3857 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
3858 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3859 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3860 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3861 0x00, 0x05, 0x00, 0x6a, 0x00),
3862 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
3863 },
3864 { .freq = 5825,
3865 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
3866 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3867 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3868 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3869 0x00, 0x05, 0x00, 0x69, 0x00),
3870 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
3871 },
3872 { .freq = 5830,
3873 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
3874 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3875 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
3876 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3877 0x00, 0x05, 0x00, 0x69, 0x00),
3878 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
3879 },
3880 { .freq = 5840,
3881 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
3882 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
3883 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
3884 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3885 0x00, 0x04, 0x00, 0x69, 0x00),
3886 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
3887 },
3888 { .freq = 5850,
3889 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
3890 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
3891 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
3892 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3893 0x00, 0x04, 0x00, 0x69, 0x00),
3894 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
3895 },
3896 { .freq = 5860,
3897 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
3898 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
3899 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
3900 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3901 0x00, 0x04, 0x00, 0x69, 0x00),
3902 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
3903 },
3904 { .freq = 5870,
3905 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
3906 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
3907 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
3908 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3909 0x00, 0x04, 0x00, 0x68, 0x00),
3910 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
3911 },
3912 { .freq = 5880,
3913 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
3914 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
3915 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
3916 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3917 0x00, 0x04, 0x00, 0x68, 0x00),
3918 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
3919 },
3920 { .freq = 5890,
3921 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
3922 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
3923 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
3924 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3925 0x00, 0x04, 0x00, 0x68, 0x00),
3926 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
3927 },
3928 { .freq = 5900,
3929 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
3930 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
3931 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
3932 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3933 0x00, 0x04, 0x00, 0x68, 0x00),
3934 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
3935 },
3936 { .freq = 5910,
3937 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
3938 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
3939 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
3940 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
3941 0x00, 0x04, 0x00, 0x68, 0x00),
3942 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
3943 },
3944 { .freq = 2412,
3945 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
3946 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
3947 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
3948 0x0b, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
3949 0x70, 0x00, 0x0b, 0x00, 0x0a),
3950 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
3951 },
3952 { .freq = 2417,
3953 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
3954 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3955 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
3956 0x0b, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
3957 0x70, 0x00, 0x0b, 0x00, 0x0a),
3958 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
3959 },
3960 { .freq = 2422,
3961 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
3962 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3963 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
3964 0x0b, 0x00, 0x0a, 0x00, 0x67, 0x00, 0x03, 0x00,
3965 0x70, 0x00, 0x0b, 0x00, 0x0a),
3966 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
3967 },
3968 { .freq = 2427,
3969 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
3970 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3971 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
3972 0x0a, 0x00, 0x0a, 0x00, 0x57, 0x00, 0x03, 0x00,
3973 0x70, 0x00, 0x0a, 0x00, 0x0a),
3974 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
3975 },
3976 { .freq = 2432,
3977 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
3978 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3979 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
3980 0x0a, 0x00, 0x0a, 0x00, 0x56, 0x00, 0x03, 0x00,
3981 0x70, 0x00, 0x0a, 0x00, 0x0a),
3982 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
3983 },
3984 { .freq = 2437,
3985 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
3986 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3987 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
3988 0x0a, 0x00, 0x0a, 0x00, 0x46, 0x00, 0x03, 0x00,
3989 0x70, 0x00, 0x0a, 0x00, 0x0a),
3990 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
3991 },
3992 { .freq = 2442,
3993 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
3994 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3995 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
3996 0x0a, 0x00, 0x0a, 0x00, 0x45, 0x00, 0x02, 0x00,
3997 0x70, 0x00, 0x0a, 0x00, 0x0a),
3998 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
3999 },
4000 { .freq = 2447,
4001 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
4002 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4003 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
4004 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
4005 0x70, 0x00, 0x0a, 0x00, 0x09),
4006 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
4007 },
4008 { .freq = 2452,
4009 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
4010 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4011 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
4012 0x0a, 0x00, 0x09, 0x00, 0x23, 0x00, 0x02, 0x00,
4013 0x70, 0x00, 0x0a, 0x00, 0x09),
4014 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
4015 },
4016 { .freq = 2457,
4017 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
4018 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4019 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
4020 0x0a, 0x00, 0x09, 0x00, 0x12, 0x00, 0x02, 0x00,
4021 0x70, 0x00, 0x0a, 0x00, 0x09),
4022 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
4023 },
4024 { .freq = 2462,
4025 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
4026 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4027 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
4028 0x09, 0x00, 0x09, 0x00, 0x02, 0x00, 0x02, 0x00,
4029 0x70, 0x00, 0x09, 0x00, 0x09),
4030 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
4031 },
4032 { .freq = 2467,
4033 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
4034 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4035 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
4036 0x09, 0x00, 0x09, 0x00, 0x01, 0x00, 0x02, 0x00,
4037 0x70, 0x00, 0x09, 0x00, 0x09),
4038 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
4039 },
4040 { .freq = 2472,
4041 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
4042 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
4043 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
4044 0x09, 0x00, 0x09, 0x00, 0x01, 0x00, 0x02, 0x00,
4045 0x70, 0x00, 0x09, 0x00, 0x09),
4046 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
4047 },
4048 { .freq = 2484,
4049 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
4050 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
4051 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
4052 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
4053 0x70, 0x00, 0x09, 0x00, 0x09),
4054 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
4055 },
4056};
4057
4058static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = {
4059 { .freq = 4920,
4060 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
4061 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
4062 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0f,
4063 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
4064 0x00, 0x0f, 0x00, 0x6f, 0x00),
4065 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
4066 },
4067 { .freq = 4930,
4068 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
4069 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
4070 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
4071 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
4072 0x00, 0x0e, 0x00, 0x6f, 0x00),
4073 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
4074 },
4075 { .freq = 4940,
4076 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
4077 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
4078 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
4079 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
4080 0x00, 0x0e, 0x00, 0x6f, 0x00),
4081 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
4082 },
4083 { .freq = 4950,
4084 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
4085 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
4086 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
4087 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
4088 0x00, 0x0e, 0x00, 0x6f, 0x00),
4089 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
4090 },
4091 { .freq = 4960,
4092 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
4093 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4094 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0e,
4095 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
4096 0x00, 0x0e, 0x00, 0x6f, 0x00),
4097 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
4098 },
4099 { .freq = 4970,
4100 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
4101 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4102 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
4103 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
4104 0x00, 0x0d, 0x00, 0x6f, 0x00),
4105 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
4106 },
4107 { .freq = 4980,
4108 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
4109 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4110 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
4111 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
4112 0x00, 0x0d, 0x00, 0x6f, 0x00),
4113 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
4114 },
4115 { .freq = 4990,
4116 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
4117 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4118 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
4119 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
4120 0x00, 0x0d, 0x00, 0x6f, 0x00),
4121 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
4122 },
4123 { .freq = 5000,
4124 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
4125 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4126 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
4127 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
4128 0x00, 0x0d, 0x00, 0x6f, 0x00),
4129 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
4130 },
4131 { .freq = 5010,
4132 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
4133 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4134 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
4135 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
4136 0x00, 0x0d, 0x00, 0x6f, 0x00),
4137 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
4138 },
4139 { .freq = 5020,
4140 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
4141 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4142 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0d,
4143 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
4144 0x00, 0x0d, 0x00, 0x6f, 0x00),
4145 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
4146 },
4147 { .freq = 5030,
4148 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
4149 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4150 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
4151 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
4152 0x00, 0x0c, 0x00, 0x6f, 0x00),
4153 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
4154 },
4155 { .freq = 5040,
4156 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
4157 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4158 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
4159 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
4160 0x00, 0x0c, 0x00, 0x6f, 0x00),
4161 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
4162 },
4163 { .freq = 5050,
4164 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
4165 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4166 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
4167 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
4168 0x00, 0x0c, 0x00, 0x6f, 0x00),
4169 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
4170 },
4171 { .freq = 5060,
4172 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
4173 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4174 0xff, 0xfd, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
4175 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x70,
4176 0x00, 0x0c, 0x00, 0x6f, 0x00),
4177 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
4178 },
4179 { .freq = 5070,
4180 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
4181 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4182 0xff, 0xfd, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
4183 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x70,
4184 0x00, 0x0b, 0x00, 0x6f, 0x00),
4185 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
4186 },
4187 { .freq = 5080,
4188 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
4189 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4190 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
4191 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
4192 0x00, 0x0b, 0x00, 0x6f, 0x00),
4193 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
4194 },
4195 { .freq = 5090,
4196 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
4197 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4198 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
4199 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
4200 0x00, 0x0b, 0x00, 0x6f, 0x00),
4201 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
4202 },
4203 { .freq = 5100,
4204 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
4205 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4206 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
4207 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
4208 0x00, 0x0b, 0x00, 0x6f, 0x00),
4209 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
4210 },
4211 { .freq = 5110,
4212 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
4213 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4214 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
4215 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
4216 0x00, 0x0b, 0x00, 0x6f, 0x00),
4217 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
4218 },
4219 { .freq = 5120,
4220 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
4221 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4222 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
4223 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
4224 0x00, 0x0b, 0x00, 0x6f, 0x00),
4225 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
4226 },
4227 { .freq = 5130,
4228 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
4229 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4230 0xff, 0xfb, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0a,
4231 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x70,
4232 0x00, 0x0a, 0x00, 0x6f, 0x00),
4233 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
4234 },
4235 { .freq = 5140,
4236 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
4237 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4238 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x0a,
4239 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
4240 0x00, 0x0a, 0x00, 0x6f, 0x00),
4241 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
4242 },
4243 { .freq = 5160,
4244 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
4245 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4246 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x09,
4247 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
4248 0x00, 0x09, 0x00, 0x6e, 0x00),
4249 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
4250 },
4251 { .freq = 5170,
4252 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
4253 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4254 0xff, 0xfb, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
4255 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x06, 0x00, 0x70,
4256 0x00, 0x09, 0x00, 0x6e, 0x00),
4257 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
4258 },
4259 { .freq = 5180,
4260 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
4261 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4262 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
4263 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
4264 0x00, 0x09, 0x00, 0x6e, 0x00),
4265 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
4266 },
4267 { .freq = 5190,
4268 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
4269 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4270 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
4271 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
4272 0x00, 0x09, 0x00, 0x6e, 0x00),
4273 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
4274 },
4275 { .freq = 5200,
4276 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
4277 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4278 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
4279 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
4280 0x00, 0x09, 0x00, 0x6e, 0x00),
4281 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
4282 },
4283 { .freq = 5210,
4284 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
4285 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4286 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
4287 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
4288 0x00, 0x09, 0x00, 0x6e, 0x00),
4289 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
4290 },
4291 { .freq = 5220,
4292 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
4293 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
4294 0xfe, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
4295 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
4296 0x00, 0x09, 0x00, 0x6e, 0x00),
4297 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
4298 },
4299 { .freq = 5230,
4300 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
4301 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
4302 0xee, 0xea, 0x00, 0x06, 0x00, 0x70, 0x00, 0x08,
4303 0x00, 0x9e, 0x00, 0xea, 0x00, 0x06, 0x00, 0x70,
4304 0x00, 0x08, 0x00, 0x6e, 0x00),
4305 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
4306 },
4307 { .freq = 5240,
4308 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
4309 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
4310 0xee, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
4311 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
4312 0x00, 0x08, 0x00, 0x6d, 0x00),
4313 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
4314 },
4315 { .freq = 5250,
4316 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
4317 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
4318 0xed, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
4319 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
4320 0x00, 0x08, 0x00, 0x6d, 0x00),
4321 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
4322 },
4323 { .freq = 5260,
4324 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
4325 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
4326 0xed, 0xd9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
4327 0x00, 0x9d, 0x00, 0xd9, 0x00, 0x05, 0x00, 0x70,
4328 0x00, 0x08, 0x00, 0x6d, 0x00),
4329 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
4330 },
4331 { .freq = 5270,
4332 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
4333 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
4334 0xed, 0xd8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
4335 0x00, 0x9c, 0x00, 0xd8, 0x00, 0x04, 0x00, 0x70,
4336 0x00, 0x07, 0x00, 0x6c, 0x00),
4337 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
4338 },
4339 { .freq = 5280,
4340 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
4341 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
4342 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
4343 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
4344 0x00, 0x07, 0x00, 0x6c, 0x00),
4345 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
4346 },
4347 { .freq = 5290,
4348 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
4349 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
4350 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
4351 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
4352 0x00, 0x07, 0x00, 0x6c, 0x00),
4353 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
4354 },
4355 { .freq = 5300,
4356 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
4357 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
4358 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
4359 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
4360 0x00, 0x07, 0x00, 0x6c, 0x00),
4361 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
4362 },
4363 { .freq = 5310,
4364 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
4365 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
4366 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
4367 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
4368 0x00, 0x07, 0x00, 0x6c, 0x00),
4369 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
4370 },
4371 { .freq = 5320,
4372 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
4373 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
4374 0xdb, 0xb8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
4375 0x00, 0x9c, 0x00, 0xb8, 0x00, 0x04, 0x00, 0x70,
4376 0x00, 0x07, 0x00, 0x6c, 0x00),
4377 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
4378 },
4379 { .freq = 5330,
4380 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
4381 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
4382 0xcb, 0xb7, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
4383 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x04, 0x00, 0x70,
4384 0x00, 0x07, 0x00, 0x6b, 0x00),
4385 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
4386 },
4387 { .freq = 5340,
4388 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
4389 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
4390 0xca, 0xb7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x07,
4391 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x70,
4392 0x00, 0x07, 0x00, 0x6b, 0x00),
4393 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
4394 },
4395 { .freq = 5350,
4396 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
4397 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
4398 0xca, 0xa7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
4399 0x00, 0x9b, 0x00, 0xa7, 0x00, 0x03, 0x00, 0x70,
4400 0x00, 0x06, 0x00, 0x6b, 0x00),
4401 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
4402 },
4403 { .freq = 5360,
4404 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
4405 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
4406 0xc9, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
4407 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
4408 0x00, 0x06, 0x00, 0x6b, 0x00),
4409 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
4410 },
4411 { .freq = 5370,
4412 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
4413 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
4414 0xc9, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
4415 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
4416 0x00, 0x06, 0x00, 0x7b, 0x00),
4417 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
4418 },
4419 { .freq = 5380,
4420 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
4421 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
4422 0xb8, 0x96, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
4423 0x00, 0x9a, 0x00, 0x96, 0x00, 0x03, 0x00, 0x70,
4424 0x00, 0x06, 0x00, 0x7a, 0x00),
4425 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
4426 },
4427 { .freq = 5390,
4428 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
4429 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
4430 0xb8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
4431 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
4432 0x00, 0x06, 0x00, 0x7a, 0x00),
4433 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
4434 },
4435 { .freq = 5400,
4436 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
4437 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
4438 0xb8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
4439 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
4440 0x00, 0x06, 0x00, 0x7a, 0x00),
4441 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
4442 },
4443 { .freq = 5410,
4444 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
4445 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
4446 0xb7, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
4447 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
4448 0x00, 0x05, 0x00, 0x7a, 0x00),
4449 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
4450 },
4451 { .freq = 5420,
4452 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
4453 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
4454 0xa7, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
4455 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
4456 0x00, 0x05, 0x00, 0x7a, 0x00),
4457 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
4458 },
4459 { .freq = 5430,
4460 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
4461 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
4462 0xa6, 0x85, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
4463 0x00, 0x99, 0x00, 0x85, 0x00, 0x02, 0x00, 0x70,
4464 0x00, 0x05, 0x00, 0x79, 0x00),
4465 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
4466 },
4467 { .freq = 5440,
4468 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
4469 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
4470 0xa6, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
4471 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
4472 0x00, 0x05, 0x00, 0x79, 0x00),
4473 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
4474 },
4475 { .freq = 5450,
4476 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
4477 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
4478 0x95, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
4479 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
4480 0x00, 0x05, 0x00, 0x79, 0x00),
4481 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
4482 },
4483 { .freq = 5460,
4484 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
4485 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
4486 0x95, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x04,
4487 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
4488 0x00, 0x04, 0x00, 0x79, 0x00),
4489 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
4490 },
4491 { .freq = 5470,
4492 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
4493 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
4494 0x94, 0x74, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
4495 0x00, 0x99, 0x00, 0x74, 0x00, 0x01, 0x00, 0x70,
4496 0x00, 0x04, 0x00, 0x79, 0x00),
4497 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
4498 },
4499 { .freq = 5480,
4500 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
4501 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
4502 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
4503 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
4504 0x00, 0x04, 0x00, 0x78, 0x00),
4505 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
4506 },
4507 { .freq = 5490,
4508 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
4509 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
4510 0x83, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
4511 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
4512 0x00, 0x04, 0x00, 0x78, 0x00),
4513 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
4514 },
4515 { .freq = 5500,
4516 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
4517 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
4518 0x82, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
4519 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
4520 0x00, 0x04, 0x00, 0x78, 0x00),
4521 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
4522 },
4523 { .freq = 5510,
4524 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
4525 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
4526 0x82, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
4527 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
4528 0x00, 0x04, 0x00, 0x78, 0x00),
4529 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
4530 },
4531 { .freq = 5520,
4532 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
4533 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
4534 0x72, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
4535 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
4536 0x00, 0x04, 0x00, 0x78, 0x00),
4537 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
4538 },
4539 { .freq = 5530,
4540 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
4541 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
4542 0x72, 0x63, 0x00, 0x01, 0x00, 0x70, 0x00, 0x03,
4543 0x00, 0x98, 0x00, 0x63, 0x00, 0x01, 0x00, 0x70,
4544 0x00, 0x03, 0x00, 0x78, 0x00),
4545 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
4546 },
4547 { .freq = 5540,
4548 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
4549 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
4550 0x71, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
4551 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
4552 0x00, 0x03, 0x00, 0x77, 0x00),
4553 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
4554 },
4555 { .freq = 5550,
4556 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
4557 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
4558 0x61, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
4559 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
4560 0x00, 0x03, 0x00, 0x77, 0x00),
4561 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
4562 },
4563 { .freq = 5560,
4564 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
4565 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
4566 0x61, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
4567 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
4568 0x00, 0x03, 0x00, 0x77, 0x00),
4569 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
4570 },
4571 { .freq = 5570,
4572 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
4573 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
4574 0x61, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
4575 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
4576 0x00, 0x02, 0x00, 0x76, 0x00),
4577 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
4578 },
4579 { .freq = 5580,
4580 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
4581 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
4582 0x60, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
4583 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
4584 0x00, 0x02, 0x00, 0x86, 0x00),
4585 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
4586 },
4587 { .freq = 5590,
4588 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
4589 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
4590 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
4591 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
4592 0x00, 0x02, 0x00, 0x86, 0x00),
4593 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
4594 },
4595 { .freq = 5600,
4596 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
4597 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
4598 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
4599 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
4600 0x00, 0x02, 0x00, 0x86, 0x00),
4601 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
4602 },
4603 { .freq = 5610,
4604 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
4605 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
4606 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
4607 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
4608 0x00, 0x02, 0x00, 0x86, 0x00),
4609 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
4610 },
4611 { .freq = 5620,
4612 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
4613 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
4614 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
4615 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
4616 0x00, 0x02, 0x00, 0x86, 0x00),
4617 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
4618 },
4619 { .freq = 5630,
4620 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
4621 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
4622 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
4623 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
4624 0x00, 0x02, 0x00, 0x86, 0x00),
4625 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
4626 },
4627 { .freq = 5640,
4628 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
4629 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
4630 0x40, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
4631 0x00, 0x95, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
4632 0x00, 0x02, 0x00, 0x85, 0x00),
4633 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
4634 },
4635 { .freq = 5650,
4636 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
4637 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
4638 0x40, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
4639 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
4640 0x00, 0x01, 0x00, 0x85, 0x00),
4641 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
4642 },
4643 { .freq = 5660,
4644 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
4645 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
4646 0x40, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
4647 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
4648 0x00, 0x01, 0x00, 0x85, 0x00),
4649 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
4650 },
4651 { .freq = 5670,
4652 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
4653 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
4654 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
4655 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
4656 0x00, 0x01, 0x00, 0x84, 0x00),
4657 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
4658 },
4659 { .freq = 5680,
4660 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
4661 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
4662 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
4663 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
4664 0x00, 0x01, 0x00, 0x84, 0x00),
4665 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
4666 },
4667 { .freq = 5690,
4668 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
4669 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
4670 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
4671 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
4672 0x00, 0x01, 0x00, 0x94, 0x00),
4673 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
4674 },
4675 { .freq = 5700,
4676 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
4677 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
4678 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
4679 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
4680 0x00, 0x01, 0x00, 0x94, 0x00),
4681 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
4682 },
4683 { .freq = 5710,
4684 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
4685 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
4686 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
4687 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
4688 0x00, 0x01, 0x00, 0x94, 0x00),
4689 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
4690 },
4691 { .freq = 5720,
4692 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
4693 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
4694 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
4695 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
4696 0x00, 0x01, 0x00, 0x94, 0x00),
4697 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
4698 },
4699 { .freq = 5725,
4700 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
4701 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
4702 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
4703 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
4704 0x00, 0x01, 0x00, 0x94, 0x00),
4705 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
4706 },
4707 { .freq = 5730,
4708 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
4709 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
4710 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
4711 0x00, 0x94, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
4712 0x00, 0x01, 0x00, 0x94, 0x00),
4713 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
4714 },
4715 { .freq = 5735,
4716 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
4717 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
4718 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4719 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
4720 0x00, 0x00, 0x00, 0x93, 0x00),
4721 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
4722 },
4723 { .freq = 5740,
4724 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
4725 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
4726 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4727 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
4728 0x00, 0x00, 0x00, 0x93, 0x00),
4729 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
4730 },
4731 { .freq = 5745,
4732 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
4733 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
4734 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4735 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
4736 0x00, 0x00, 0x00, 0x93, 0x00),
4737 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
4738 },
4739 { .freq = 5750,
4740 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
4741 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
4742 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4743 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
4744 0x00, 0x00, 0x00, 0x93, 0x00),
4745 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
4746 },
4747 { .freq = 5755,
4748 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
4749 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
4750 0x10, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4751 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
4752 0x00, 0x00, 0x00, 0x93, 0x00),
4753 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
4754 },
4755 { .freq = 5760,
4756 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
4757 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
4758 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4759 0x00, 0x93, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
4760 0x00, 0x00, 0x00, 0x93, 0x00),
4761 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
4762 },
4763 { .freq = 5765,
4764 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
4765 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
4766 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4767 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
4768 0x00, 0x00, 0x00, 0x92, 0x00),
4769 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
4770 },
4771 { .freq = 5770,
4772 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
4773 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
4774 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4775 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
4776 0x00, 0x00, 0x00, 0x92, 0x00),
4777 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
4778 },
4779 { .freq = 5775,
4780 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
4781 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
4782 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4783 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
4784 0x00, 0x00, 0x00, 0x92, 0x00),
4785 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
4786 },
4787 { .freq = 5780,
4788 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
4789 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
4790 0x10, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4791 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4792 0x00, 0x00, 0x00, 0x92, 0x00),
4793 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
4794 },
4795 { .freq = 5785,
4796 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
4797 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4798 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4799 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4800 0x00, 0x00, 0x00, 0x92, 0x00),
4801 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
4802 },
4803 { .freq = 5790,
4804 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
4805 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4806 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4807 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4808 0x00, 0x00, 0x00, 0x92, 0x00),
4809 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
4810 },
4811 { .freq = 5795,
4812 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
4813 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4814 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4815 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4816 0x00, 0x00, 0x00, 0x92, 0x00),
4817 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
4818 },
4819 { .freq = 5800,
4820 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
4821 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4822 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4823 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4824 0x00, 0x00, 0x00, 0x92, 0x00),
4825 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
4826 },
4827 { .freq = 5805,
4828 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
4829 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4830 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4831 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4832 0x00, 0x00, 0x00, 0x92, 0x00),
4833 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
4834 },
4835 { .freq = 5810,
4836 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
4837 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4838 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4839 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4840 0x00, 0x00, 0x00, 0x92, 0x00),
4841 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
4842 },
4843 { .freq = 5815,
4844 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
4845 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4846 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4847 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4848 0x00, 0x00, 0x00, 0x92, 0x00),
4849 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
4850 },
4851 { .freq = 5820,
4852 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
4853 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4854 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4855 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4856 0x00, 0x00, 0x00, 0x92, 0x00),
4857 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
4858 },
4859 { .freq = 5825,
4860 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
4861 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4862 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4863 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4864 0x00, 0x00, 0x00, 0x92, 0x00),
4865 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
4866 },
4867 { .freq = 5830,
4868 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
4869 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4870 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4871 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4872 0x00, 0x00, 0x00, 0x92, 0x00),
4873 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
4874 },
4875 { .freq = 5840,
4876 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
4877 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
4878 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4879 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
4880 0x00, 0x00, 0x00, 0x92, 0x00),
4881 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
4882 },
4883 { .freq = 5850,
4884 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
4885 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
4886 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4887 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
4888 0x00, 0x00, 0x00, 0x92, 0x00),
4889 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
4890 },
4891 { .freq = 5860,
4892 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
4893 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
4894 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4895 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
4896 0x00, 0x00, 0x00, 0x92, 0x00),
4897 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
4898 },
4899 { .freq = 5870,
4900 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
4901 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
4902 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4903 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
4904 0x00, 0x00, 0x00, 0x91, 0x00),
4905 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
4906 },
4907 { .freq = 5880,
4908 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
4909 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
4910 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4911 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
4912 0x00, 0x00, 0x00, 0x91, 0x00),
4913 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
4914 },
4915 { .freq = 5890,
4916 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
4917 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
4918 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4919 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
4920 0x00, 0x00, 0x00, 0x91, 0x00),
4921 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
4922 },
4923 { .freq = 5900,
4924 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
4925 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
4926 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4927 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
4928 0x00, 0x00, 0x00, 0x91, 0x00),
4929 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
4930 },
4931 { .freq = 5910,
4932 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
4933 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
4934 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
4935 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
4936 0x00, 0x00, 0x00, 0x91, 0x00),
4937 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
4938 },
4939 { .freq = 2412,
4940 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
4941 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
4942 0x00, 0x00, 0x89, 0x00, 0x03, 0x00, 0x70, 0x00,
4943 0x0f, 0x00, 0x0b, 0x00, 0x89, 0x00, 0x03, 0x00,
4944 0x70, 0x00, 0x0f, 0x00, 0x0b),
4945 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
4946 },
4947 { .freq = 2417,
4948 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
4949 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4950 0x00, 0x00, 0x89, 0x00, 0x03, 0x00, 0x70, 0x00,
4951 0x0f, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
4952 0x70, 0x00, 0x0f, 0x00, 0x0a),
4953 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
4954 },
4955 { .freq = 2422,
4956 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
4957 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4958 0x00, 0x00, 0x89, 0x00, 0x03, 0x00, 0x70, 0x00,
4959 0x0f, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
4960 0x70, 0x00, 0x0f, 0x00, 0x0a),
4961 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
4962 },
4963 { .freq = 2427,
4964 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
4965 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4966 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
4967 0x0e, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
4968 0x70, 0x00, 0x0e, 0x00, 0x0a),
4969 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
4970 },
4971 { .freq = 2432,
4972 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
4973 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4974 0x00, 0x00, 0x77, 0x00, 0x03, 0x00, 0x70, 0x00,
4975 0x0e, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
4976 0x70, 0x00, 0x0e, 0x00, 0x0a),
4977 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
4978 },
4979 { .freq = 2437,
4980 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
4981 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4982 0x00, 0x00, 0x76, 0x00, 0x03, 0x00, 0x70, 0x00,
4983 0x0e, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
4984 0x70, 0x00, 0x0e, 0x00, 0x0a),
4985 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
4986 },
4987 { .freq = 2442,
4988 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
4989 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4990 0x00, 0x00, 0x66, 0x00, 0x03, 0x00, 0x70, 0x00,
4991 0x0e, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x03, 0x00,
4992 0x70, 0x00, 0x0e, 0x00, 0x0a),
4993 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
4994 },
4995 { .freq = 2447,
4996 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
4997 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4998 0x00, 0x00, 0x55, 0x00, 0x02, 0x00, 0x70, 0x00,
4999 0x0e, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
5000 0x70, 0x00, 0x0e, 0x00, 0x09),
5001 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
5002 },
5003 { .freq = 2452,
5004 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
5005 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5006 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
5007 0x0e, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
5008 0x70, 0x00, 0x0e, 0x00, 0x09),
5009 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
5010 },
5011 { .freq = 2457,
5012 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
5013 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5014 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
5015 0x0d, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
5016 0x70, 0x00, 0x0d, 0x00, 0x09),
5017 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
5018 },
5019 { .freq = 2462,
5020 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
5021 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5022 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 0x70, 0x00,
5023 0x0d, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
5024 0x70, 0x00, 0x0d, 0x00, 0x09),
5025 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
5026 },
5027 { .freq = 2467,
5028 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
5029 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5030 0x00, 0x00, 0x22, 0x00, 0x02, 0x00, 0x70, 0x00,
5031 0x0d, 0x00, 0x08, 0x00, 0x22, 0x00, 0x02, 0x00,
5032 0x70, 0x00, 0x0d, 0x00, 0x08),
5033 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
5034 },
5035 { .freq = 2472,
5036 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
5037 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
5038 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x70, 0x00,
5039 0x0d, 0x00, 0x08, 0x00, 0x11, 0x00, 0x02, 0x00,
5040 0x70, 0x00, 0x0d, 0x00, 0x08),
5041 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
5042 },
5043 { .freq = 2484,
5044 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
5045 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
5046 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
5047 0x0d, 0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00,
5048 0x70, 0x00, 0x0d, 0x00, 0x08),
5049 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
5050 },
5051};
5052
5053static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = {
5054 { .freq = 4920,
5055 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
5056 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
5057 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5058 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5059 0x00, 0x0f, 0x00, 0x6f, 0x00),
5060 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
5061 },
5062 { .freq = 4930,
5063 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
5064 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
5065 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5066 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5067 0x00, 0x0f, 0x00, 0x6f, 0x00),
5068 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
5069 },
5070 { .freq = 4940,
5071 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
5072 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
5073 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5074 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5075 0x00, 0x0f, 0x00, 0x6f, 0x00),
5076 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
5077 },
5078 { .freq = 4950,
5079 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
5080 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
5081 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5082 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5083 0x00, 0x0f, 0x00, 0x6f, 0x00),
5084 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
5085 },
5086 { .freq = 4960,
5087 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
5088 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5089 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5090 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5091 0x00, 0x0f, 0x00, 0x6f, 0x00),
5092 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
5093 },
5094 { .freq = 4970,
5095 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
5096 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5097 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5098 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5099 0x00, 0x0f, 0x00, 0x6f, 0x00),
5100 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
5101 },
5102 { .freq = 4980,
5103 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
5104 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5105 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5106 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5107 0x00, 0x0f, 0x00, 0x6f, 0x00),
5108 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
5109 },
5110 { .freq = 4990,
5111 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
5112 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5113 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5114 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5115 0x00, 0x0f, 0x00, 0x6f, 0x00),
5116 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
5117 },
5118 { .freq = 5000,
5119 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
5120 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5121 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5122 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5123 0x00, 0x0f, 0x00, 0x6f, 0x00),
5124 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
5125 },
5126 { .freq = 5010,
5127 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
5128 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5129 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5130 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5131 0x00, 0x0f, 0x00, 0x6f, 0x00),
5132 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
5133 },
5134 { .freq = 5020,
5135 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
5136 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5137 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5138 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5139 0x00, 0x0f, 0x00, 0x6f, 0x00),
5140 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
5141 },
5142 { .freq = 5030,
5143 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
5144 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5145 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5146 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5147 0x00, 0x0f, 0x00, 0x6f, 0x00),
5148 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
5149 },
5150 { .freq = 5040,
5151 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
5152 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5153 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5154 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5155 0x00, 0x0f, 0x00, 0x6f, 0x00),
5156 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
5157 },
5158 { .freq = 5050,
5159 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
5160 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5161 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5162 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5163 0x00, 0x0f, 0x00, 0x6f, 0x00),
5164 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
5165 },
5166 { .freq = 5060,
5167 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
5168 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5169 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5170 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
5171 0x00, 0x0f, 0x00, 0x6f, 0x00),
5172 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
5173 },
5174 { .freq = 5070,
5175 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
5176 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5177 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5178 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
5179 0x00, 0x0f, 0x00, 0x6f, 0x00),
5180 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
5181 },
5182 { .freq = 5080,
5183 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
5184 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5185 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5186 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
5187 0x00, 0x0f, 0x00, 0x6f, 0x00),
5188 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
5189 },
5190 { .freq = 5090,
5191 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
5192 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5193 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
5194 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
5195 0x00, 0x0f, 0x00, 0x6f, 0x00),
5196 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
5197 },
5198 { .freq = 5100,
5199 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
5200 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5201 0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
5202 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
5203 0x00, 0x0f, 0x00, 0x6f, 0x00),
5204 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
5205 },
5206 { .freq = 5110,
5207 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
5208 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5209 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
5210 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
5211 0x00, 0x0f, 0x00, 0x6f, 0x00),
5212 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
5213 },
5214 { .freq = 5120,
5215 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
5216 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5217 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
5218 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
5219 0x00, 0x0f, 0x00, 0x6f, 0x00),
5220 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
5221 },
5222 { .freq = 5130,
5223 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
5224 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5225 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
5226 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
5227 0x00, 0x0f, 0x00, 0x6f, 0x00),
5228 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
5229 },
5230 { .freq = 5140,
5231 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
5232 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5233 0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
5234 0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
5235 0x00, 0x0f, 0x00, 0x6f, 0x00),
5236 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
5237 },
5238 { .freq = 5160,
5239 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
5240 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5241 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
5242 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
5243 0x00, 0x0e, 0x00, 0x6f, 0x00),
5244 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
5245 },
5246 { .freq = 5170,
5247 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
5248 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5249 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
5250 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
5251 0x00, 0x0e, 0x00, 0x6f, 0x00),
5252 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
5253 },
5254 { .freq = 5180,
5255 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
5256 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5257 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
5258 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
5259 0x00, 0x0e, 0x00, 0x6f, 0x00),
5260 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
5261 },
5262 { .freq = 5190,
5263 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
5264 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5265 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
5266 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
5267 0x00, 0x0d, 0x00, 0x6f, 0x00),
5268 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
5269 },
5270 { .freq = 5200,
5271 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
5272 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5273 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
5274 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
5275 0x00, 0x0d, 0x00, 0x6f, 0x00),
5276 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
5277 },
5278 { .freq = 5210,
5279 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
5280 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5281 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
5282 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
5283 0x00, 0x0d, 0x00, 0x6f, 0x00),
5284 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
5285 },
5286 { .freq = 5220,
5287 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
5288 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
5289 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
5290 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
5291 0x00, 0x0d, 0x00, 0x6f, 0x00),
5292 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
5293 },
5294 { .freq = 5230,
5295 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
5296 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
5297 0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
5298 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
5299 0x00, 0x0d, 0x00, 0x6f, 0x00),
5300 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
5301 },
5302 { .freq = 5240,
5303 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
5304 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
5305 0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
5306 0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
5307 0x00, 0x0d, 0x00, 0x6f, 0x00),
5308 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
5309 },
5310 { .freq = 5250,
5311 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
5312 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
5313 0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
5314 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
5315 0x00, 0x0d, 0x00, 0x6f, 0x00),
5316 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
5317 },
5318 { .freq = 5260,
5319 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
5320 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
5321 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
5322 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
5323 0x00, 0x0d, 0x00, 0x6f, 0x00),
5324 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
5325 },
5326 { .freq = 5270,
5327 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
5328 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
5329 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
5330 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
5331 0x00, 0x0c, 0x00, 0x6f, 0x00),
5332 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
5333 },
5334 { .freq = 5280,
5335 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
5336 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
5337 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
5338 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
5339 0x00, 0x0c, 0x00, 0x6f, 0x00),
5340 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
5341 },
5342 { .freq = 5290,
5343 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
5344 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
5345 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
5346 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
5347 0x00, 0x0c, 0x00, 0x6f, 0x00),
5348 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
5349 },
5350 { .freq = 5300,
5351 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
5352 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
5353 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
5354 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
5355 0x00, 0x0c, 0x00, 0x6f, 0x00),
5356 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
5357 },
5358 { .freq = 5310,
5359 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
5360 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
5361 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
5362 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
5363 0x00, 0x0c, 0x00, 0x6f, 0x00),
5364 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
5365 },
5366 { .freq = 5320,
5367 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
5368 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
5369 0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
5370 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
5371 0x00, 0x0c, 0x00, 0x6f, 0x00),
5372 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
5373 },
5374 { .freq = 5330,
5375 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
5376 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
5377 0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
5378 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
5379 0x00, 0x0b, 0x00, 0x6f, 0x00),
5380 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
5381 },
5382 { .freq = 5340,
5383 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
5384 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
5385 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
5386 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
5387 0x00, 0x0b, 0x00, 0x6f, 0x00),
5388 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
5389 },
5390 { .freq = 5350,
5391 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
5392 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
5393 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
5394 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
5395 0x00, 0x0b, 0x00, 0x6f, 0x00),
5396 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
5397 },
5398 { .freq = 5360,
5399 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
5400 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
5401 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
5402 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
5403 0x00, 0x0a, 0x00, 0x6f, 0x00),
5404 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
5405 },
5406 { .freq = 5370,
5407 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
5408 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
5409 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
5410 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
5411 0x00, 0x0a, 0x00, 0x6f, 0x00),
5412 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
5413 },
5414 { .freq = 5380,
5415 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
5416 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
5417 0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
5418 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
5419 0x00, 0x0a, 0x00, 0x6f, 0x00),
5420 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
5421 },
5422 { .freq = 5390,
5423 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
5424 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
5425 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
5426 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
5427 0x00, 0x0a, 0x00, 0x6f, 0x00),
5428 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
5429 },
5430 { .freq = 5400,
5431 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
5432 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
5433 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
5434 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
5435 0x00, 0x0a, 0x00, 0x6f, 0x00),
5436 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
5437 },
5438 { .freq = 5410,
5439 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
5440 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
5441 0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
5442 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
5443 0x00, 0x0a, 0x00, 0x6f, 0x00),
5444 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
5445 },
5446 { .freq = 5420,
5447 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
5448 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
5449 0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
5450 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
5451 0x00, 0x0a, 0x00, 0x6f, 0x00),
5452 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
5453 },
5454 { .freq = 5430,
5455 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
5456 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
5457 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
5458 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
5459 0x00, 0x0a, 0x00, 0x6f, 0x00),
5460 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
5461 },
5462 { .freq = 5440,
5463 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
5464 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
5465 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
5466 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
5467 0x00, 0x09, 0x00, 0x6f, 0x00),
5468 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
5469 },
5470 { .freq = 5450,
5471 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
5472 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
5473 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
5474 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
5475 0x00, 0x09, 0x00, 0x6f, 0x00),
5476 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
5477 },
5478 { .freq = 5460,
5479 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
5480 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
5481 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
5482 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
5483 0x00, 0x09, 0x00, 0x6f, 0x00),
5484 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
5485 },
5486 { .freq = 5470,
5487 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
5488 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
5489 0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
5490 0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
5491 0x00, 0x09, 0x00, 0x6f, 0x00),
5492 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
5493 },
5494 { .freq = 5480,
5495 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
5496 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
5497 0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
5498 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
5499 0x00, 0x09, 0x00, 0x6f, 0x00),
5500 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
5501 },
5502 { .freq = 5490,
5503 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
5504 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
5505 0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
5506 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
5507 0x00, 0x09, 0x00, 0x6f, 0x00),
5508 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
5509 },
5510 { .freq = 5500,
5511 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
5512 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
5513 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
5514 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
5515 0x00, 0x09, 0x00, 0x6f, 0x00),
5516 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
5517 },
5518 { .freq = 5510,
5519 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
5520 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
5521 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
5522 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
5523 0x00, 0x09, 0x00, 0x6f, 0x00),
5524 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
5525 },
5526 { .freq = 5520,
5527 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
5528 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
5529 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
5530 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
5531 0x00, 0x09, 0x00, 0x6f, 0x00),
5532 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
5533 },
5534 { .freq = 5530,
5535 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
5536 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
5537 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
5538 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
5539 0x00, 0x09, 0x00, 0x6f, 0x00),
5540 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
5541 },
5542 { .freq = 5540,
5543 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
5544 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
5545 0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
5546 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
5547 0x00, 0x09, 0x00, 0x6f, 0x00),
5548 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
5549 },
5550 { .freq = 5550,
5551 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
5552 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
5553 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
5554 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
5555 0x00, 0x09, 0x00, 0x6f, 0x00),
5556 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
5557 },
5558 { .freq = 5560,
5559 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
5560 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
5561 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
5562 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
5563 0x00, 0x09, 0x00, 0x6f, 0x00),
5564 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
5565 },
5566 { .freq = 5570,
5567 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
5568 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
5569 0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
5570 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
5571 0x00, 0x09, 0x00, 0x6f, 0x00),
5572 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
5573 },
5574 { .freq = 5580,
5575 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
5576 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
5577 0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
5578 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
5579 0x00, 0x08, 0x00, 0x6f, 0x00),
5580 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
5581 },
5582 { .freq = 5590,
5583 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
5584 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
5585 0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
5586 0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
5587 0x00, 0x08, 0x00, 0x6f, 0x00),
5588 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
5589 },
5590 { .freq = 5600,
5591 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
5592 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
5593 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
5594 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
5595 0x00, 0x08, 0x00, 0x6f, 0x00),
5596 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
5597 },
5598 { .freq = 5610,
5599 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
5600 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
5601 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
5602 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
5603 0x00, 0x08, 0x00, 0x6f, 0x00),
5604 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
5605 },
5606 { .freq = 5620,
5607 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
5608 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
5609 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
5610 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
5611 0x00, 0x07, 0x00, 0x6f, 0x00),
5612 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
5613 },
5614 { .freq = 5630,
5615 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
5616 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
5617 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
5618 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
5619 0x00, 0x07, 0x00, 0x6f, 0x00),
5620 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
5621 },
5622 { .freq = 5640,
5623 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
5624 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
5625 0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
5626 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
5627 0x00, 0x07, 0x00, 0x6f, 0x00),
5628 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
5629 },
5630 { .freq = 5650,
5631 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
5632 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
5633 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
5634 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
5635 0x00, 0x07, 0x00, 0x6f, 0x00),
5636 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
5637 },
5638 { .freq = 5660,
5639 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
5640 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
5641 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5642 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
5643 0x00, 0x06, 0x00, 0x6f, 0x00),
5644 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
5645 },
5646 { .freq = 5670,
5647 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
5648 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
5649 0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5650 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5651 0x00, 0x06, 0x00, 0x6f, 0x00),
5652 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
5653 },
5654 { .freq = 5680,
5655 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
5656 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
5657 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5658 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5659 0x00, 0x06, 0x00, 0x6f, 0x00),
5660 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
5661 },
5662 { .freq = 5690,
5663 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
5664 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
5665 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5666 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5667 0x00, 0x06, 0x00, 0x6f, 0x00),
5668 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
5669 },
5670 { .freq = 5700,
5671 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
5672 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
5673 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5674 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5675 0x00, 0x06, 0x00, 0x6e, 0x00),
5676 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
5677 },
5678 { .freq = 5710,
5679 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
5680 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
5681 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5682 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5683 0x00, 0x06, 0x00, 0x6e, 0x00),
5684 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
5685 },
5686 { .freq = 5720,
5687 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
5688 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
5689 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5690 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5691 0x00, 0x06, 0x00, 0x6e, 0x00),
5692 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
5693 },
5694 { .freq = 5725,
5695 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
5696 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
5697 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5698 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5699 0x00, 0x06, 0x00, 0x6e, 0x00),
5700 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
5701 },
5702 { .freq = 5730,
5703 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
5704 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
5705 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5706 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5707 0x00, 0x06, 0x00, 0x6e, 0x00),
5708 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
5709 },
5710 { .freq = 5735,
5711 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
5712 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
5713 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5714 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5715 0x00, 0x06, 0x00, 0x6d, 0x00),
5716 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
5717 },
5718 { .freq = 5740,
5719 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
5720 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
5721 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5722 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5723 0x00, 0x06, 0x00, 0x6d, 0x00),
5724 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
5725 },
5726 { .freq = 5745,
5727 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
5728 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
5729 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
5730 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
5731 0x00, 0x06, 0x00, 0x6d, 0x00),
5732 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
5733 },
5734 { .freq = 5750,
5735 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
5736 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
5737 0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5738 0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
5739 0x00, 0x05, 0x00, 0x6d, 0x00),
5740 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
5741 },
5742 { .freq = 5755,
5743 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
5744 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
5745 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5746 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
5747 0x00, 0x05, 0x00, 0x6c, 0x00),
5748 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
5749 },
5750 { .freq = 5760,
5751 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
5752 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
5753 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5754 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
5755 0x00, 0x05, 0x00, 0x6c, 0x00),
5756 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
5757 },
5758 { .freq = 5765,
5759 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
5760 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
5761 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5762 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
5763 0x00, 0x05, 0x00, 0x6c, 0x00),
5764 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
5765 },
5766 { .freq = 5770,
5767 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
5768 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
5769 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5770 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
5771 0x00, 0x05, 0x00, 0x6b, 0x00),
5772 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
5773 },
5774 { .freq = 5775,
5775 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
5776 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
5777 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5778 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
5779 0x00, 0x05, 0x00, 0x6b, 0x00),
5780 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
5781 },
5782 { .freq = 5780,
5783 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
5784 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
5785 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5786 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
5787 0x00, 0x05, 0x00, 0x6b, 0x00),
5788 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
5789 },
5790 { .freq = 5785,
5791 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
5792 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5793 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5794 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
5795 0x00, 0x05, 0x00, 0x6b, 0x00),
5796 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
5797 },
5798 { .freq = 5790,
5799 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
5800 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5801 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5802 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
5803 0x00, 0x05, 0x00, 0x6b, 0x00),
5804 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
5805 },
5806 { .freq = 5795,
5807 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
5808 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5809 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5810 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5811 0x00, 0x05, 0x00, 0x6b, 0x00),
5812 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
5813 },
5814 { .freq = 5800,
5815 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
5816 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5817 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5818 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5819 0x00, 0x05, 0x00, 0x6b, 0x00),
5820 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
5821 },
5822 { .freq = 5805,
5823 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
5824 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5825 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5826 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5827 0x00, 0x05, 0x00, 0x6a, 0x00),
5828 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
5829 },
5830 { .freq = 5810,
5831 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
5832 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5833 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5834 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5835 0x00, 0x05, 0x00, 0x6a, 0x00),
5836 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
5837 },
5838 { .freq = 5815,
5839 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
5840 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5841 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5842 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5843 0x00, 0x05, 0x00, 0x6a, 0x00),
5844 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
5845 },
5846 { .freq = 5820,
5847 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
5848 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5849 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5850 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5851 0x00, 0x05, 0x00, 0x6a, 0x00),
5852 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
5853 },
5854 { .freq = 5825,
5855 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
5856 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5857 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5858 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5859 0x00, 0x05, 0x00, 0x69, 0x00),
5860 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
5861 },
5862 { .freq = 5830,
5863 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
5864 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5865 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
5866 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5867 0x00, 0x05, 0x00, 0x69, 0x00),
5868 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
5869 },
5870 { .freq = 5840,
5871 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
5872 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
5873 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
5874 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5875 0x00, 0x04, 0x00, 0x69, 0x00),
5876 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
5877 },
5878 { .freq = 5850,
5879 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
5880 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
5881 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
5882 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5883 0x00, 0x04, 0x00, 0x69, 0x00),
5884 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
5885 },
5886 { .freq = 5860,
5887 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
5888 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
5889 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
5890 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5891 0x00, 0x04, 0x00, 0x69, 0x00),
5892 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
5893 },
5894 { .freq = 5870,
5895 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
5896 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
5897 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
5898 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5899 0x00, 0x04, 0x00, 0x68, 0x00),
5900 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
5901 },
5902 { .freq = 5880,
5903 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
5904 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
5905 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
5906 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5907 0x00, 0x04, 0x00, 0x68, 0x00),
5908 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
5909 },
5910 { .freq = 5890,
5911 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
5912 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
5913 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
5914 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5915 0x00, 0x04, 0x00, 0x68, 0x00),
5916 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
5917 },
5918 { .freq = 5900,
5919 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
5920 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
5921 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
5922 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5923 0x00, 0x04, 0x00, 0x68, 0x00),
5924 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
5925 },
5926 { .freq = 5910,
5927 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
5928 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
5929 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
5930 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
5931 0x00, 0x04, 0x00, 0x68, 0x00),
5932 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
5933 },
5934 { .freq = 2412,
5935 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
5936 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
5937 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
5938 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
5939 0x70, 0x00, 0x0b, 0x00, 0x0a),
5940 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
5941 },
5942 { .freq = 2417,
5943 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
5944 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5945 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
5946 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
5947 0x70, 0x00, 0x0b, 0x00, 0x0a),
5948 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
5949 },
5950 { .freq = 2422,
5951 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
5952 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5953 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
5954 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
5955 0x70, 0x00, 0x0b, 0x00, 0x0a),
5956 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
5957 },
5958 { .freq = 2427,
5959 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
5960 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5961 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
5962 0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
5963 0x70, 0x00, 0x0a, 0x00, 0x0a),
5964 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
5965 },
5966 { .freq = 2432,
5967 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
5968 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5969 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
5970 0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
5971 0x70, 0x00, 0x0a, 0x00, 0x0a),
5972 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
5973 },
5974 { .freq = 2437,
5975 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
5976 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5977 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
5978 0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
5979 0x70, 0x00, 0x0a, 0x00, 0x0a),
5980 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
5981 },
5982 { .freq = 2442,
5983 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
5984 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5985 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
5986 0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
5987 0x70, 0x00, 0x0a, 0x00, 0x0a),
5988 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
5989 },
5990 { .freq = 2447,
5991 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
5992 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5993 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
5994 0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
5995 0x70, 0x00, 0x0a, 0x00, 0x09),
5996 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
5997 },
5998 { .freq = 2452,
5999 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
6000 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
6001 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
6002 0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
6003 0x70, 0x00, 0x0a, 0x00, 0x09),
6004 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
6005 },
6006 { .freq = 2457,
6007 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
6008 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
6009 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
6010 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
6011 0x70, 0x00, 0x0a, 0x00, 0x09),
6012 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
6013 },
6014 { .freq = 2462,
6015 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
6016 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
6017 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
6018 0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
6019 0x70, 0x00, 0x09, 0x00, 0x09),
6020 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
6021 },
6022 { .freq = 2467,
6023 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
6024 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
6025 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
6026 0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
6027 0x70, 0x00, 0x09, 0x00, 0x09),
6028 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
6029 },
6030 { .freq = 2472,
6031 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
6032 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
6033 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
6034 0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
6035 0x70, 0x00, 0x09, 0x00, 0x09),
6036 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
6037 },
6038 { .freq = 2484,
6039 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
6040 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
6041 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
6042 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
6043 0x70, 0x00, 0x09, 0x00, 0x09),
6044 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
6045 },
28}; 6046};
29 6047
6048/* TODO: add support for rev4+ devices by searching in rev4+ tables */
30const struct b43_nphy_channeltab_entry_rev3 * 6049const struct b43_nphy_channeltab_entry_rev3 *
31b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq) 6050b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
32{ 6051{
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index fda6dafecb8..302600c0afa 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -4,6 +4,9 @@
4 4
5 Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com> 5 Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
6 6
7 Some parts of the code in this file are derived from the brcm80211
8 driver Copyright (c) 2010 Broadcom Corporation
9
7 This program is free software; you can redistribute it and/or modify 10 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by 11 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or 12 the Free Software Foundation; either version 2 of the License, or
@@ -28,13 +31,1085 @@
28 31
29#include "tables_nphy.h" 32#include "tables_nphy.h"
30 33
34#define B2056_SYN (0x0 << 12)
35#define B2056_TX0 (0x2 << 12)
36#define B2056_TX1 (0x3 << 12)
37#define B2056_RX0 (0x6 << 12)
38#define B2056_RX1 (0x7 << 12)
39#define B2056_ALLTX (0xE << 12)
40#define B2056_ALLRX (0xF << 12)
41
42#define B2056_SYN_RESERVED_ADDR0 0x00
43#define B2056_SYN_IDCODE 0x01
44#define B2056_SYN_RESERVED_ADDR2 0x02
45#define B2056_SYN_RESERVED_ADDR3 0x03
46#define B2056_SYN_RESERVED_ADDR4 0x04
47#define B2056_SYN_RESERVED_ADDR5 0x05
48#define B2056_SYN_RESERVED_ADDR6 0x06
49#define B2056_SYN_RESERVED_ADDR7 0x07
50#define B2056_SYN_COM_CTRL 0x08
51#define B2056_SYN_COM_PU 0x09
52#define B2056_SYN_COM_OVR 0x0A
53#define B2056_SYN_COM_RESET 0x0B
54#define B2056_SYN_COM_RCAL 0x0C
55#define B2056_SYN_COM_RC_RXLPF 0x0D
56#define B2056_SYN_COM_RC_TXLPF 0x0E
57#define B2056_SYN_COM_RC_RXHPF 0x0F
58#define B2056_SYN_RESERVED_ADDR16 0x10
59#define B2056_SYN_RESERVED_ADDR17 0x11
60#define B2056_SYN_RESERVED_ADDR18 0x12
61#define B2056_SYN_RESERVED_ADDR19 0x13
62#define B2056_SYN_RESERVED_ADDR20 0x14
63#define B2056_SYN_RESERVED_ADDR21 0x15
64#define B2056_SYN_RESERVED_ADDR22 0x16
65#define B2056_SYN_RESERVED_ADDR23 0x17
66#define B2056_SYN_RESERVED_ADDR24 0x18
67#define B2056_SYN_RESERVED_ADDR25 0x19
68#define B2056_SYN_RESERVED_ADDR26 0x1A
69#define B2056_SYN_RESERVED_ADDR27 0x1B
70#define B2056_SYN_RESERVED_ADDR28 0x1C
71#define B2056_SYN_RESERVED_ADDR29 0x1D
72#define B2056_SYN_RESERVED_ADDR30 0x1E
73#define B2056_SYN_RESERVED_ADDR31 0x1F
74#define B2056_SYN_GPIO_MASTER1 0x20
75#define B2056_SYN_GPIO_MASTER2 0x21
76#define B2056_SYN_TOPBIAS_MASTER 0x22
77#define B2056_SYN_TOPBIAS_RCAL 0x23
78#define B2056_SYN_AFEREG 0x24
79#define B2056_SYN_TEMPPROCSENSE 0x25
80#define B2056_SYN_TEMPPROCSENSEIDAC 0x26
81#define B2056_SYN_TEMPPROCSENSERCAL 0x27
82#define B2056_SYN_LPO 0x28
83#define B2056_SYN_VDDCAL_MASTER 0x29
84#define B2056_SYN_VDDCAL_IDAC 0x2A
85#define B2056_SYN_VDDCAL_STATUS 0x2B
86#define B2056_SYN_RCAL_MASTER 0x2C
87#define B2056_SYN_RCAL_CODE_OUT 0x2D
88#define B2056_SYN_RCCAL_CTRL0 0x2E
89#define B2056_SYN_RCCAL_CTRL1 0x2F
90#define B2056_SYN_RCCAL_CTRL2 0x30
91#define B2056_SYN_RCCAL_CTRL3 0x31
92#define B2056_SYN_RCCAL_CTRL4 0x32
93#define B2056_SYN_RCCAL_CTRL5 0x33
94#define B2056_SYN_RCCAL_CTRL6 0x34
95#define B2056_SYN_RCCAL_CTRL7 0x35
96#define B2056_SYN_RCCAL_CTRL8 0x36
97#define B2056_SYN_RCCAL_CTRL9 0x37
98#define B2056_SYN_RCCAL_CTRL10 0x38
99#define B2056_SYN_RCCAL_CTRL11 0x39
100#define B2056_SYN_ZCAL_SPARE1 0x3A
101#define B2056_SYN_ZCAL_SPARE2 0x3B
102#define B2056_SYN_PLL_MAST1 0x3C
103#define B2056_SYN_PLL_MAST2 0x3D
104#define B2056_SYN_PLL_MAST3 0x3E
105#define B2056_SYN_PLL_BIAS_RESET 0x3F
106#define B2056_SYN_PLL_XTAL0 0x40
107#define B2056_SYN_PLL_XTAL1 0x41
108#define B2056_SYN_PLL_XTAL3 0x42
109#define B2056_SYN_PLL_XTAL4 0x43
110#define B2056_SYN_PLL_XTAL5 0x44
111#define B2056_SYN_PLL_XTAL6 0x45
112#define B2056_SYN_PLL_REFDIV 0x46
113#define B2056_SYN_PLL_PFD 0x47
114#define B2056_SYN_PLL_CP1 0x48
115#define B2056_SYN_PLL_CP2 0x49
116#define B2056_SYN_PLL_CP3 0x4A
117#define B2056_SYN_PLL_LOOPFILTER1 0x4B
118#define B2056_SYN_PLL_LOOPFILTER2 0x4C
119#define B2056_SYN_PLL_LOOPFILTER3 0x4D
120#define B2056_SYN_PLL_LOOPFILTER4 0x4E
121#define B2056_SYN_PLL_LOOPFILTER5 0x4F
122#define B2056_SYN_PLL_MMD1 0x50
123#define B2056_SYN_PLL_MMD2 0x51
124#define B2056_SYN_PLL_VCO1 0x52
125#define B2056_SYN_PLL_VCO2 0x53
126#define B2056_SYN_PLL_MONITOR1 0x54
127#define B2056_SYN_PLL_MONITOR2 0x55
128#define B2056_SYN_PLL_VCOCAL1 0x56
129#define B2056_SYN_PLL_VCOCAL2 0x57
130#define B2056_SYN_PLL_VCOCAL4 0x58
131#define B2056_SYN_PLL_VCOCAL5 0x59
132#define B2056_SYN_PLL_VCOCAL6 0x5A
133#define B2056_SYN_PLL_VCOCAL7 0x5B
134#define B2056_SYN_PLL_VCOCAL8 0x5C
135#define B2056_SYN_PLL_VCOCAL9 0x5D
136#define B2056_SYN_PLL_VCOCAL10 0x5E
137#define B2056_SYN_PLL_VCOCAL11 0x5F
138#define B2056_SYN_PLL_VCOCAL12 0x60
139#define B2056_SYN_PLL_VCOCAL13 0x61
140#define B2056_SYN_PLL_VREG 0x62
141#define B2056_SYN_PLL_STATUS1 0x63
142#define B2056_SYN_PLL_STATUS2 0x64
143#define B2056_SYN_PLL_STATUS3 0x65
144#define B2056_SYN_LOGEN_PU0 0x66
145#define B2056_SYN_LOGEN_PU1 0x67
146#define B2056_SYN_LOGEN_PU2 0x68
147#define B2056_SYN_LOGEN_PU3 0x69
148#define B2056_SYN_LOGEN_PU5 0x6A
149#define B2056_SYN_LOGEN_PU6 0x6B
150#define B2056_SYN_LOGEN_PU7 0x6C
151#define B2056_SYN_LOGEN_PU8 0x6D
152#define B2056_SYN_LOGEN_BIAS_RESET 0x6E
153#define B2056_SYN_LOGEN_RCCR1 0x6F
154#define B2056_SYN_LOGEN_VCOBUF1 0x70
155#define B2056_SYN_LOGEN_MIXER1 0x71
156#define B2056_SYN_LOGEN_MIXER2 0x72
157#define B2056_SYN_LOGEN_BUF1 0x73
158#define B2056_SYN_LOGENBUF2 0x74
159#define B2056_SYN_LOGEN_BUF3 0x75
160#define B2056_SYN_LOGEN_BUF4 0x76
161#define B2056_SYN_LOGEN_DIV1 0x77
162#define B2056_SYN_LOGEN_DIV2 0x78
163#define B2056_SYN_LOGEN_DIV3 0x79
164#define B2056_SYN_LOGEN_ACL1 0x7A
165#define B2056_SYN_LOGEN_ACL2 0x7B
166#define B2056_SYN_LOGEN_ACL3 0x7C
167#define B2056_SYN_LOGEN_ACL4 0x7D
168#define B2056_SYN_LOGEN_ACL5 0x7E
169#define B2056_SYN_LOGEN_ACL6 0x7F
170#define B2056_SYN_LOGEN_ACLOUT 0x80
171#define B2056_SYN_LOGEN_ACLCAL1 0x81
172#define B2056_SYN_LOGEN_ACLCAL2 0x82
173#define B2056_SYN_LOGEN_ACLCAL3 0x83
174#define B2056_SYN_CALEN 0x84
175#define B2056_SYN_LOGEN_PEAKDET1 0x85
176#define B2056_SYN_LOGEN_CORE_ACL_OVR 0x86
177#define B2056_SYN_LOGEN_RX_DIFF_ACL_OVR 0x87
178#define B2056_SYN_LOGEN_TX_DIFF_ACL_OVR 0x88
179#define B2056_SYN_LOGEN_RX_CMOS_ACL_OVR 0x89
180#define B2056_SYN_LOGEN_TX_CMOS_ACL_OVR 0x8A
181#define B2056_SYN_LOGEN_VCOBUF2 0x8B
182#define B2056_SYN_LOGEN_MIXER3 0x8C
183#define B2056_SYN_LOGEN_BUF5 0x8D
184#define B2056_SYN_LOGEN_BUF6 0x8E
185#define B2056_SYN_LOGEN_CBUFRX1 0x8F
186#define B2056_SYN_LOGEN_CBUFRX2 0x90
187#define B2056_SYN_LOGEN_CBUFRX3 0x91
188#define B2056_SYN_LOGEN_CBUFRX4 0x92
189#define B2056_SYN_LOGEN_CBUFTX1 0x93
190#define B2056_SYN_LOGEN_CBUFTX2 0x94
191#define B2056_SYN_LOGEN_CBUFTX3 0x95
192#define B2056_SYN_LOGEN_CBUFTX4 0x96
193#define B2056_SYN_LOGEN_CMOSRX1 0x97
194#define B2056_SYN_LOGEN_CMOSRX2 0x98
195#define B2056_SYN_LOGEN_CMOSRX3 0x99
196#define B2056_SYN_LOGEN_CMOSRX4 0x9A
197#define B2056_SYN_LOGEN_CMOSTX1 0x9B
198#define B2056_SYN_LOGEN_CMOSTX2 0x9C
199#define B2056_SYN_LOGEN_CMOSTX3 0x9D
200#define B2056_SYN_LOGEN_CMOSTX4 0x9E
201#define B2056_SYN_LOGEN_VCOBUF2_OVRVAL 0x9F
202#define B2056_SYN_LOGEN_MIXER3_OVRVAL 0xA0
203#define B2056_SYN_LOGEN_BUF5_OVRVAL 0xA1
204#define B2056_SYN_LOGEN_BUF6_OVRVAL 0xA2
205#define B2056_SYN_LOGEN_CBUFRX1_OVRVAL 0xA3
206#define B2056_SYN_LOGEN_CBUFRX2_OVRVAL 0xA4
207#define B2056_SYN_LOGEN_CBUFRX3_OVRVAL 0xA5
208#define B2056_SYN_LOGEN_CBUFRX4_OVRVAL 0xA6
209#define B2056_SYN_LOGEN_CBUFTX1_OVRVAL 0xA7
210#define B2056_SYN_LOGEN_CBUFTX2_OVRVAL 0xA8
211#define B2056_SYN_LOGEN_CBUFTX3_OVRVAL 0xA9
212#define B2056_SYN_LOGEN_CBUFTX4_OVRVAL 0xAA
213#define B2056_SYN_LOGEN_CMOSRX1_OVRVAL 0xAB
214#define B2056_SYN_LOGEN_CMOSRX2_OVRVAL 0xAC
215#define B2056_SYN_LOGEN_CMOSRX3_OVRVAL 0xAD
216#define B2056_SYN_LOGEN_CMOSRX4_OVRVAL 0xAE
217#define B2056_SYN_LOGEN_CMOSTX1_OVRVAL 0xAF
218#define B2056_SYN_LOGEN_CMOSTX2_OVRVAL 0xB0
219#define B2056_SYN_LOGEN_CMOSTX3_OVRVAL 0xB1
220#define B2056_SYN_LOGEN_CMOSTX4_OVRVAL 0xB2
221#define B2056_SYN_LOGEN_ACL_WAITCNT 0xB3
222#define B2056_SYN_LOGEN_CORE_CALVALID 0xB4
223#define B2056_SYN_LOGEN_RX_CMOS_CALVALID 0xB5
224#define B2056_SYN_LOGEN_TX_CMOS_VALID 0xB6
225
226#define B2056_TX_RESERVED_ADDR0 0x00
227#define B2056_TX_IDCODE 0x01
228#define B2056_TX_RESERVED_ADDR2 0x02
229#define B2056_TX_RESERVED_ADDR3 0x03
230#define B2056_TX_RESERVED_ADDR4 0x04
231#define B2056_TX_RESERVED_ADDR5 0x05
232#define B2056_TX_RESERVED_ADDR6 0x06
233#define B2056_TX_RESERVED_ADDR7 0x07
234#define B2056_TX_COM_CTRL 0x08
235#define B2056_TX_COM_PU 0x09
236#define B2056_TX_COM_OVR 0x0A
237#define B2056_TX_COM_RESET 0x0B
238#define B2056_TX_COM_RCAL 0x0C
239#define B2056_TX_COM_RC_RXLPF 0x0D
240#define B2056_TX_COM_RC_TXLPF 0x0E
241#define B2056_TX_COM_RC_RXHPF 0x0F
242#define B2056_TX_RESERVED_ADDR16 0x10
243#define B2056_TX_RESERVED_ADDR17 0x11
244#define B2056_TX_RESERVED_ADDR18 0x12
245#define B2056_TX_RESERVED_ADDR19 0x13
246#define B2056_TX_RESERVED_ADDR20 0x14
247#define B2056_TX_RESERVED_ADDR21 0x15
248#define B2056_TX_RESERVED_ADDR22 0x16
249#define B2056_TX_RESERVED_ADDR23 0x17
250#define B2056_TX_RESERVED_ADDR24 0x18
251#define B2056_TX_RESERVED_ADDR25 0x19
252#define B2056_TX_RESERVED_ADDR26 0x1A
253#define B2056_TX_RESERVED_ADDR27 0x1B
254#define B2056_TX_RESERVED_ADDR28 0x1C
255#define B2056_TX_RESERVED_ADDR29 0x1D
256#define B2056_TX_RESERVED_ADDR30 0x1E
257#define B2056_TX_RESERVED_ADDR31 0x1F
258#define B2056_TX_IQCAL_GAIN_BW 0x20
259#define B2056_TX_LOFT_FINE_I 0x21
260#define B2056_TX_LOFT_FINE_Q 0x22
261#define B2056_TX_LOFT_COARSE_I 0x23
262#define B2056_TX_LOFT_COARSE_Q 0x24
263#define B2056_TX_TX_COM_MASTER1 0x25
264#define B2056_TX_TX_COM_MASTER2 0x26
265#define B2056_TX_RXIQCAL_TXMUX 0x27
266#define B2056_TX_TX_SSI_MASTER 0x28
267#define B2056_TX_IQCAL_VCM_HG 0x29
268#define B2056_TX_IQCAL_IDAC 0x2A
269#define B2056_TX_TSSI_VCM 0x2B
270#define B2056_TX_TX_AMP_DET 0x2C
271#define B2056_TX_TX_SSI_MUX 0x2D
272#define B2056_TX_TSSIA 0x2E
273#define B2056_TX_TSSIG 0x2F
274#define B2056_TX_TSSI_MISC1 0x30
275#define B2056_TX_TSSI_MISC2 0x31
276#define B2056_TX_TSSI_MISC3 0x32
277#define B2056_TX_PA_SPARE1 0x33
278#define B2056_TX_PA_SPARE2 0x34
279#define B2056_TX_INTPAA_MASTER 0x35
280#define B2056_TX_INTPAA_GAIN 0x36
281#define B2056_TX_INTPAA_BOOST_TUNE 0x37
282#define B2056_TX_INTPAA_IAUX_STAT 0x38
283#define B2056_TX_INTPAA_IAUX_DYN 0x39
284#define B2056_TX_INTPAA_IMAIN_STAT 0x3A
285#define B2056_TX_INTPAA_IMAIN_DYN 0x3B
286#define B2056_TX_INTPAA_CASCBIAS 0x3C
287#define B2056_TX_INTPAA_PASLOPE 0x3D
288#define B2056_TX_INTPAA_PA_MISC 0x3E
289#define B2056_TX_INTPAG_MASTER 0x3F
290#define B2056_TX_INTPAG_GAIN 0x40
291#define B2056_TX_INTPAG_BOOST_TUNE 0x41
292#define B2056_TX_INTPAG_IAUX_STAT 0x42
293#define B2056_TX_INTPAG_IAUX_DYN 0x43
294#define B2056_TX_INTPAG_IMAIN_STAT 0x44
295#define B2056_TX_INTPAG_IMAIN_DYN 0x45
296#define B2056_TX_INTPAG_CASCBIAS 0x46
297#define B2056_TX_INTPAG_PASLOPE 0x47
298#define B2056_TX_INTPAG_PA_MISC 0x48
299#define B2056_TX_PADA_MASTER 0x49
300#define B2056_TX_PADA_IDAC 0x4A
301#define B2056_TX_PADA_CASCBIAS 0x4B
302#define B2056_TX_PADA_GAIN 0x4C
303#define B2056_TX_PADA_BOOST_TUNE 0x4D
304#define B2056_TX_PADA_SLOPE 0x4E
305#define B2056_TX_PADG_MASTER 0x4F
306#define B2056_TX_PADG_IDAC 0x50
307#define B2056_TX_PADG_CASCBIAS 0x51
308#define B2056_TX_PADG_GAIN 0x52
309#define B2056_TX_PADG_BOOST_TUNE 0x53
310#define B2056_TX_PADG_SLOPE 0x54
311#define B2056_TX_PGAA_MASTER 0x55
312#define B2056_TX_PGAA_IDAC 0x56
313#define B2056_TX_PGAA_GAIN 0x57
314#define B2056_TX_PGAA_BOOST_TUNE 0x58
315#define B2056_TX_PGAA_SLOPE 0x59
316#define B2056_TX_PGAA_MISC 0x5A
317#define B2056_TX_PGAG_MASTER 0x5B
318#define B2056_TX_PGAG_IDAC 0x5C
319#define B2056_TX_PGAG_GAIN 0x5D
320#define B2056_TX_PGAG_BOOST_TUNE 0x5E
321#define B2056_TX_PGAG_SLOPE 0x5F
322#define B2056_TX_PGAG_MISC 0x60
323#define B2056_TX_MIXA_MASTER 0x61
324#define B2056_TX_MIXA_BOOST_TUNE 0x62
325#define B2056_TX_MIXG 0x63
326#define B2056_TX_MIXG_BOOST_TUNE 0x64
327#define B2056_TX_BB_GM_MASTER 0x65
328#define B2056_TX_GMBB_GM 0x66
329#define B2056_TX_GMBB_IDAC 0x67
330#define B2056_TX_TXLPF_MASTER 0x68
331#define B2056_TX_TXLPF_RCCAL 0x69
332#define B2056_TX_TXLPF_RCCAL_OFF0 0x6A
333#define B2056_TX_TXLPF_RCCAL_OFF1 0x6B
334#define B2056_TX_TXLPF_RCCAL_OFF2 0x6C
335#define B2056_TX_TXLPF_RCCAL_OFF3 0x6D
336#define B2056_TX_TXLPF_RCCAL_OFF4 0x6E
337#define B2056_TX_TXLPF_RCCAL_OFF5 0x6F
338#define B2056_TX_TXLPF_RCCAL_OFF6 0x70
339#define B2056_TX_TXLPF_BW 0x71
340#define B2056_TX_TXLPF_GAIN 0x72
341#define B2056_TX_TXLPF_IDAC 0x73
342#define B2056_TX_TXLPF_IDAC_0 0x74
343#define B2056_TX_TXLPF_IDAC_1 0x75
344#define B2056_TX_TXLPF_IDAC_2 0x76
345#define B2056_TX_TXLPF_IDAC_3 0x77
346#define B2056_TX_TXLPF_IDAC_4 0x78
347#define B2056_TX_TXLPF_IDAC_5 0x79
348#define B2056_TX_TXLPF_IDAC_6 0x7A
349#define B2056_TX_TXLPF_OPAMP_IDAC 0x7B
350#define B2056_TX_TXLPF_MISC 0x7C
351#define B2056_TX_TXSPARE1 0x7D
352#define B2056_TX_TXSPARE2 0x7E
353#define B2056_TX_TXSPARE3 0x7F
354#define B2056_TX_TXSPARE4 0x80
355#define B2056_TX_TXSPARE5 0x81
356#define B2056_TX_TXSPARE6 0x82
357#define B2056_TX_TXSPARE7 0x83
358#define B2056_TX_TXSPARE8 0x84
359#define B2056_TX_TXSPARE9 0x85
360#define B2056_TX_TXSPARE10 0x86
361#define B2056_TX_TXSPARE11 0x87
362#define B2056_TX_TXSPARE12 0x88
363#define B2056_TX_TXSPARE13 0x89
364#define B2056_TX_TXSPARE14 0x8A
365#define B2056_TX_TXSPARE15 0x8B
366#define B2056_TX_TXSPARE16 0x8C
367#define B2056_TX_STATUS_INTPA_GAIN 0x8D
368#define B2056_TX_STATUS_PAD_GAIN 0x8E
369#define B2056_TX_STATUS_PGA_GAIN 0x8F
370#define B2056_TX_STATUS_GM_TXLPF_GAIN 0x90
371#define B2056_TX_STATUS_TXLPF_BW 0x91
372#define B2056_TX_STATUS_TXLPF_RC 0x92
373#define B2056_TX_GMBB_IDAC0 0x93
374#define B2056_TX_GMBB_IDAC1 0x94
375#define B2056_TX_GMBB_IDAC2 0x95
376#define B2056_TX_GMBB_IDAC3 0x96
377#define B2056_TX_GMBB_IDAC4 0x97
378#define B2056_TX_GMBB_IDAC5 0x98
379#define B2056_TX_GMBB_IDAC6 0x99
380#define B2056_TX_GMBB_IDAC7 0x9A
381
382#define B2056_RX_RESERVED_ADDR0 0x00
383#define B2056_RX_IDCODE 0x01
384#define B2056_RX_RESERVED_ADDR2 0x02
385#define B2056_RX_RESERVED_ADDR3 0x03
386#define B2056_RX_RESERVED_ADDR4 0x04
387#define B2056_RX_RESERVED_ADDR5 0x05
388#define B2056_RX_RESERVED_ADDR6 0x06
389#define B2056_RX_RESERVED_ADDR7 0x07
390#define B2056_RX_COM_CTRL 0x08
391#define B2056_RX_COM_PU 0x09
392#define B2056_RX_COM_OVR 0x0A
393#define B2056_RX_COM_RESET 0x0B
394#define B2056_RX_COM_RCAL 0x0C
395#define B2056_RX_COM_RC_RXLPF 0x0D
396#define B2056_RX_COM_RC_TXLPF 0x0E
397#define B2056_RX_COM_RC_RXHPF 0x0F
398#define B2056_RX_RESERVED_ADDR16 0x10
399#define B2056_RX_RESERVED_ADDR17 0x11
400#define B2056_RX_RESERVED_ADDR18 0x12
401#define B2056_RX_RESERVED_ADDR19 0x13
402#define B2056_RX_RESERVED_ADDR20 0x14
403#define B2056_RX_RESERVED_ADDR21 0x15
404#define B2056_RX_RESERVED_ADDR22 0x16
405#define B2056_RX_RESERVED_ADDR23 0x17
406#define B2056_RX_RESERVED_ADDR24 0x18
407#define B2056_RX_RESERVED_ADDR25 0x19
408#define B2056_RX_RESERVED_ADDR26 0x1A
409#define B2056_RX_RESERVED_ADDR27 0x1B
410#define B2056_RX_RESERVED_ADDR28 0x1C
411#define B2056_RX_RESERVED_ADDR29 0x1D
412#define B2056_RX_RESERVED_ADDR30 0x1E
413#define B2056_RX_RESERVED_ADDR31 0x1F
414#define B2056_RX_RXIQCAL_RXMUX 0x20
415#define B2056_RX_RSSI_PU 0x21
416#define B2056_RX_RSSI_SEL 0x22
417#define B2056_RX_RSSI_GAIN 0x23
418#define B2056_RX_RSSI_NB_IDAC 0x24
419#define B2056_RX_RSSI_WB2I_IDAC_1 0x25
420#define B2056_RX_RSSI_WB2I_IDAC_2 0x26
421#define B2056_RX_RSSI_WB2Q_IDAC_1 0x27
422#define B2056_RX_RSSI_WB2Q_IDAC_2 0x28
423#define B2056_RX_RSSI_POLE 0x29
424#define B2056_RX_RSSI_WB1_IDAC 0x2A
425#define B2056_RX_RSSI_MISC 0x2B
426#define B2056_RX_LNAA_MASTER 0x2C
427#define B2056_RX_LNAA_TUNE 0x2D
428#define B2056_RX_LNAA_GAIN 0x2E
429#define B2056_RX_LNA_A_SLOPE 0x2F
430#define B2056_RX_BIASPOLE_LNAA1_IDAC 0x30
431#define B2056_RX_LNAA2_IDAC 0x31
432#define B2056_RX_LNA1A_MISC 0x32
433#define B2056_RX_LNAG_MASTER 0x33
434#define B2056_RX_LNAG_TUNE 0x34
435#define B2056_RX_LNAG_GAIN 0x35
436#define B2056_RX_LNA_G_SLOPE 0x36
437#define B2056_RX_BIASPOLE_LNAG1_IDAC 0x37
438#define B2056_RX_LNAG2_IDAC 0x38
439#define B2056_RX_LNA1G_MISC 0x39
440#define B2056_RX_MIXA_MASTER 0x3A
441#define B2056_RX_MIXA_VCM 0x3B
442#define B2056_RX_MIXA_CTRLPTAT 0x3C
443#define B2056_RX_MIXA_LOB_BIAS 0x3D
444#define B2056_RX_MIXA_CORE_IDAC 0x3E
445#define B2056_RX_MIXA_CMFB_IDAC 0x3F
446#define B2056_RX_MIXA_BIAS_AUX 0x40
447#define B2056_RX_MIXA_BIAS_MAIN 0x41
448#define B2056_RX_MIXA_BIAS_MISC 0x42
449#define B2056_RX_MIXA_MAST_BIAS 0x43
450#define B2056_RX_MIXG_MASTER 0x44
451#define B2056_RX_MIXG_VCM 0x45
452#define B2056_RX_MIXG_CTRLPTAT 0x46
453#define B2056_RX_MIXG_LOB_BIAS 0x47
454#define B2056_RX_MIXG_CORE_IDAC 0x48
455#define B2056_RX_MIXG_CMFB_IDAC 0x49
456#define B2056_RX_MIXG_BIAS_AUX 0x4A
457#define B2056_RX_MIXG_BIAS_MAIN 0x4B
458#define B2056_RX_MIXG_BIAS_MISC 0x4C
459#define B2056_RX_MIXG_MAST_BIAS 0x4D
460#define B2056_RX_TIA_MASTER 0x4E
461#define B2056_RX_TIA_IOPAMP 0x4F
462#define B2056_RX_TIA_QOPAMP 0x50
463#define B2056_RX_TIA_IMISC 0x51
464#define B2056_RX_TIA_QMISC 0x52
465#define B2056_RX_TIA_GAIN 0x53
466#define B2056_RX_TIA_SPARE1 0x54
467#define B2056_RX_TIA_SPARE2 0x55
468#define B2056_RX_BB_LPF_MASTER 0x56
469#define B2056_RX_AACI_MASTER 0x57
470#define B2056_RX_RXLPF_IDAC 0x58
471#define B2056_RX_RXLPF_OPAMPBIAS_LOWQ 0x59
472#define B2056_RX_RXLPF_OPAMPBIAS_HIGHQ 0x5A
473#define B2056_RX_RXLPF_BIAS_DCCANCEL 0x5B
474#define B2056_RX_RXLPF_OUTVCM 0x5C
475#define B2056_RX_RXLPF_INVCM_BODY 0x5D
476#define B2056_RX_RXLPF_CC_OP 0x5E
477#define B2056_RX_RXLPF_GAIN 0x5F
478#define B2056_RX_RXLPF_Q_BW 0x60
479#define B2056_RX_RXLPF_HP_CORNER_BW 0x61
480#define B2056_RX_RXLPF_RCCAL_HPC 0x62
481#define B2056_RX_RXHPF_OFF0 0x63
482#define B2056_RX_RXHPF_OFF1 0x64
483#define B2056_RX_RXHPF_OFF2 0x65
484#define B2056_RX_RXHPF_OFF3 0x66
485#define B2056_RX_RXHPF_OFF4 0x67
486#define B2056_RX_RXHPF_OFF5 0x68
487#define B2056_RX_RXHPF_OFF6 0x69
488#define B2056_RX_RXHPF_OFF7 0x6A
489#define B2056_RX_RXLPF_RCCAL_LPC 0x6B
490#define B2056_RX_RXLPF_OFF_0 0x6C
491#define B2056_RX_RXLPF_OFF_1 0x6D
492#define B2056_RX_RXLPF_OFF_2 0x6E
493#define B2056_RX_RXLPF_OFF_3 0x6F
494#define B2056_RX_RXLPF_OFF_4 0x70
495#define B2056_RX_UNUSED 0x71
496#define B2056_RX_VGA_MASTER 0x72
497#define B2056_RX_VGA_BIAS 0x73
498#define B2056_RX_VGA_BIAS_DCCANCEL 0x74
499#define B2056_RX_VGA_GAIN 0x75
500#define B2056_RX_VGA_HP_CORNER_BW 0x76
501#define B2056_RX_VGABUF_BIAS 0x77
502#define B2056_RX_VGABUF_GAIN_BW 0x78
503#define B2056_RX_TXFBMIX_A 0x79
504#define B2056_RX_TXFBMIX_G 0x7A
505#define B2056_RX_RXSPARE1 0x7B
506#define B2056_RX_RXSPARE2 0x7C
507#define B2056_RX_RXSPARE3 0x7D
508#define B2056_RX_RXSPARE4 0x7E
509#define B2056_RX_RXSPARE5 0x7F
510#define B2056_RX_RXSPARE6 0x80
511#define B2056_RX_RXSPARE7 0x81
512#define B2056_RX_RXSPARE8 0x82
513#define B2056_RX_RXSPARE9 0x83
514#define B2056_RX_RXSPARE10 0x84
515#define B2056_RX_RXSPARE11 0x85
516#define B2056_RX_RXSPARE12 0x86
517#define B2056_RX_RXSPARE13 0x87
518#define B2056_RX_RXSPARE14 0x88
519#define B2056_RX_RXSPARE15 0x89
520#define B2056_RX_RXSPARE16 0x8A
521#define B2056_RX_STATUS_LNAA_GAIN 0x8B
522#define B2056_RX_STATUS_LNAG_GAIN 0x8C
523#define B2056_RX_STATUS_MIXTIA_GAIN 0x8D
524#define B2056_RX_STATUS_RXLPF_GAIN 0x8E
525#define B2056_RX_STATUS_VGA_BUF_GAIN 0x8F
526#define B2056_RX_STATUS_RXLPF_Q 0x90
527#define B2056_RX_STATUS_RXLPF_BUF_BW 0x91
528#define B2056_RX_STATUS_RXLPF_VGA_HPC 0x92
529#define B2056_RX_STATUS_RXLPF_RC 0x93
530#define B2056_RX_STATUS_HPC_RC 0x94
531
532#define B2056_LNA1_A_PU 0x01
533#define B2056_LNA2_A_PU 0x02
534#define B2056_LNA1_G_PU 0x01
535#define B2056_LNA2_G_PU 0x02
536#define B2056_MIXA_PU_I 0x01
537#define B2056_MIXA_PU_Q 0x02
538#define B2056_MIXA_PU_GM 0x10
539#define B2056_MIXG_PU_I 0x01
540#define B2056_MIXG_PU_Q 0x02
541#define B2056_MIXG_PU_GM 0x10
542#define B2056_TIA_PU 0x01
543#define B2056_BB_LPF_PU 0x20
544#define B2056_W1_PU 0x02
545#define B2056_W2_PU 0x04
546#define B2056_NB_PU 0x08
547#define B2056_RSSI_W1_SEL 0x02
548#define B2056_RSSI_W2_SEL 0x04
549#define B2056_RSSI_NB_SEL 0x08
550#define B2056_VCM_MASK 0x1C
551#define B2056_RSSI_VCM_SHIFT 0x02
552
553#define B2056_SYN (0x0 << 12)
554#define B2056_TX0 (0x2 << 12)
555#define B2056_TX1 (0x3 << 12)
556#define B2056_RX0 (0x6 << 12)
557#define B2056_RX1 (0x7 << 12)
558#define B2056_ALLTX (0xE << 12)
559#define B2056_ALLRX (0xF << 12)
560
561#define B2056_SYN_RESERVED_ADDR0 0x00
562#define B2056_SYN_IDCODE 0x01
563#define B2056_SYN_RESERVED_ADDR2 0x02
564#define B2056_SYN_RESERVED_ADDR3 0x03
565#define B2056_SYN_RESERVED_ADDR4 0x04
566#define B2056_SYN_RESERVED_ADDR5 0x05
567#define B2056_SYN_RESERVED_ADDR6 0x06
568#define B2056_SYN_RESERVED_ADDR7 0x07
569#define B2056_SYN_COM_CTRL 0x08
570#define B2056_SYN_COM_PU 0x09
571#define B2056_SYN_COM_OVR 0x0A
572#define B2056_SYN_COM_RESET 0x0B
573#define B2056_SYN_COM_RCAL 0x0C
574#define B2056_SYN_COM_RC_RXLPF 0x0D
575#define B2056_SYN_COM_RC_TXLPF 0x0E
576#define B2056_SYN_COM_RC_RXHPF 0x0F
577#define B2056_SYN_RESERVED_ADDR16 0x10
578#define B2056_SYN_RESERVED_ADDR17 0x11
579#define B2056_SYN_RESERVED_ADDR18 0x12
580#define B2056_SYN_RESERVED_ADDR19 0x13
581#define B2056_SYN_RESERVED_ADDR20 0x14
582#define B2056_SYN_RESERVED_ADDR21 0x15
583#define B2056_SYN_RESERVED_ADDR22 0x16
584#define B2056_SYN_RESERVED_ADDR23 0x17
585#define B2056_SYN_RESERVED_ADDR24 0x18
586#define B2056_SYN_RESERVED_ADDR25 0x19
587#define B2056_SYN_RESERVED_ADDR26 0x1A
588#define B2056_SYN_RESERVED_ADDR27 0x1B
589#define B2056_SYN_RESERVED_ADDR28 0x1C
590#define B2056_SYN_RESERVED_ADDR29 0x1D
591#define B2056_SYN_RESERVED_ADDR30 0x1E
592#define B2056_SYN_RESERVED_ADDR31 0x1F
593#define B2056_SYN_GPIO_MASTER1 0x20
594#define B2056_SYN_GPIO_MASTER2 0x21
595#define B2056_SYN_TOPBIAS_MASTER 0x22
596#define B2056_SYN_TOPBIAS_RCAL 0x23
597#define B2056_SYN_AFEREG 0x24
598#define B2056_SYN_TEMPPROCSENSE 0x25
599#define B2056_SYN_TEMPPROCSENSEIDAC 0x26
600#define B2056_SYN_TEMPPROCSENSERCAL 0x27
601#define B2056_SYN_LPO 0x28
602#define B2056_SYN_VDDCAL_MASTER 0x29
603#define B2056_SYN_VDDCAL_IDAC 0x2A
604#define B2056_SYN_VDDCAL_STATUS 0x2B
605#define B2056_SYN_RCAL_MASTER 0x2C
606#define B2056_SYN_RCAL_CODE_OUT 0x2D
607#define B2056_SYN_RCCAL_CTRL0 0x2E
608#define B2056_SYN_RCCAL_CTRL1 0x2F
609#define B2056_SYN_RCCAL_CTRL2 0x30
610#define B2056_SYN_RCCAL_CTRL3 0x31
611#define B2056_SYN_RCCAL_CTRL4 0x32
612#define B2056_SYN_RCCAL_CTRL5 0x33
613#define B2056_SYN_RCCAL_CTRL6 0x34
614#define B2056_SYN_RCCAL_CTRL7 0x35
615#define B2056_SYN_RCCAL_CTRL8 0x36
616#define B2056_SYN_RCCAL_CTRL9 0x37
617#define B2056_SYN_RCCAL_CTRL10 0x38
618#define B2056_SYN_RCCAL_CTRL11 0x39
619#define B2056_SYN_ZCAL_SPARE1 0x3A
620#define B2056_SYN_ZCAL_SPARE2 0x3B
621#define B2056_SYN_PLL_MAST1 0x3C
622#define B2056_SYN_PLL_MAST2 0x3D
623#define B2056_SYN_PLL_MAST3 0x3E
624#define B2056_SYN_PLL_BIAS_RESET 0x3F
625#define B2056_SYN_PLL_XTAL0 0x40
626#define B2056_SYN_PLL_XTAL1 0x41
627#define B2056_SYN_PLL_XTAL3 0x42
628#define B2056_SYN_PLL_XTAL4 0x43
629#define B2056_SYN_PLL_XTAL5 0x44
630#define B2056_SYN_PLL_XTAL6 0x45
631#define B2056_SYN_PLL_REFDIV 0x46
632#define B2056_SYN_PLL_PFD 0x47
633#define B2056_SYN_PLL_CP1 0x48
634#define B2056_SYN_PLL_CP2 0x49
635#define B2056_SYN_PLL_CP3 0x4A
636#define B2056_SYN_PLL_LOOPFILTER1 0x4B
637#define B2056_SYN_PLL_LOOPFILTER2 0x4C
638#define B2056_SYN_PLL_LOOPFILTER3 0x4D
639#define B2056_SYN_PLL_LOOPFILTER4 0x4E
640#define B2056_SYN_PLL_LOOPFILTER5 0x4F
641#define B2056_SYN_PLL_MMD1 0x50
642#define B2056_SYN_PLL_MMD2 0x51
643#define B2056_SYN_PLL_VCO1 0x52
644#define B2056_SYN_PLL_VCO2 0x53
645#define B2056_SYN_PLL_MONITOR1 0x54
646#define B2056_SYN_PLL_MONITOR2 0x55
647#define B2056_SYN_PLL_VCOCAL1 0x56
648#define B2056_SYN_PLL_VCOCAL2 0x57
649#define B2056_SYN_PLL_VCOCAL4 0x58
650#define B2056_SYN_PLL_VCOCAL5 0x59
651#define B2056_SYN_PLL_VCOCAL6 0x5A
652#define B2056_SYN_PLL_VCOCAL7 0x5B
653#define B2056_SYN_PLL_VCOCAL8 0x5C
654#define B2056_SYN_PLL_VCOCAL9 0x5D
655#define B2056_SYN_PLL_VCOCAL10 0x5E
656#define B2056_SYN_PLL_VCOCAL11 0x5F
657#define B2056_SYN_PLL_VCOCAL12 0x60
658#define B2056_SYN_PLL_VCOCAL13 0x61
659#define B2056_SYN_PLL_VREG 0x62
660#define B2056_SYN_PLL_STATUS1 0x63
661#define B2056_SYN_PLL_STATUS2 0x64
662#define B2056_SYN_PLL_STATUS3 0x65
663#define B2056_SYN_LOGEN_PU0 0x66
664#define B2056_SYN_LOGEN_PU1 0x67
665#define B2056_SYN_LOGEN_PU2 0x68
666#define B2056_SYN_LOGEN_PU3 0x69
667#define B2056_SYN_LOGEN_PU5 0x6A
668#define B2056_SYN_LOGEN_PU6 0x6B
669#define B2056_SYN_LOGEN_PU7 0x6C
670#define B2056_SYN_LOGEN_PU8 0x6D
671#define B2056_SYN_LOGEN_BIAS_RESET 0x6E
672#define B2056_SYN_LOGEN_RCCR1 0x6F
673#define B2056_SYN_LOGEN_VCOBUF1 0x70
674#define B2056_SYN_LOGEN_MIXER1 0x71
675#define B2056_SYN_LOGEN_MIXER2 0x72
676#define B2056_SYN_LOGEN_BUF1 0x73
677#define B2056_SYN_LOGENBUF2 0x74
678#define B2056_SYN_LOGEN_BUF3 0x75
679#define B2056_SYN_LOGEN_BUF4 0x76
680#define B2056_SYN_LOGEN_DIV1 0x77
681#define B2056_SYN_LOGEN_DIV2 0x78
682#define B2056_SYN_LOGEN_DIV3 0x79
683#define B2056_SYN_LOGEN_ACL1 0x7A
684#define B2056_SYN_LOGEN_ACL2 0x7B
685#define B2056_SYN_LOGEN_ACL3 0x7C
686#define B2056_SYN_LOGEN_ACL4 0x7D
687#define B2056_SYN_LOGEN_ACL5 0x7E
688#define B2056_SYN_LOGEN_ACL6 0x7F
689#define B2056_SYN_LOGEN_ACLOUT 0x80
690#define B2056_SYN_LOGEN_ACLCAL1 0x81
691#define B2056_SYN_LOGEN_ACLCAL2 0x82
692#define B2056_SYN_LOGEN_ACLCAL3 0x83
693#define B2056_SYN_CALEN 0x84
694#define B2056_SYN_LOGEN_PEAKDET1 0x85
695#define B2056_SYN_LOGEN_CORE_ACL_OVR 0x86
696#define B2056_SYN_LOGEN_RX_DIFF_ACL_OVR 0x87
697#define B2056_SYN_LOGEN_TX_DIFF_ACL_OVR 0x88
698#define B2056_SYN_LOGEN_RX_CMOS_ACL_OVR 0x89
699#define B2056_SYN_LOGEN_TX_CMOS_ACL_OVR 0x8A
700#define B2056_SYN_LOGEN_VCOBUF2 0x8B
701#define B2056_SYN_LOGEN_MIXER3 0x8C
702#define B2056_SYN_LOGEN_BUF5 0x8D
703#define B2056_SYN_LOGEN_BUF6 0x8E
704#define B2056_SYN_LOGEN_CBUFRX1 0x8F
705#define B2056_SYN_LOGEN_CBUFRX2 0x90
706#define B2056_SYN_LOGEN_CBUFRX3 0x91
707#define B2056_SYN_LOGEN_CBUFRX4 0x92
708#define B2056_SYN_LOGEN_CBUFTX1 0x93
709#define B2056_SYN_LOGEN_CBUFTX2 0x94
710#define B2056_SYN_LOGEN_CBUFTX3 0x95
711#define B2056_SYN_LOGEN_CBUFTX4 0x96
712#define B2056_SYN_LOGEN_CMOSRX1 0x97
713#define B2056_SYN_LOGEN_CMOSRX2 0x98
714#define B2056_SYN_LOGEN_CMOSRX3 0x99
715#define B2056_SYN_LOGEN_CMOSRX4 0x9A
716#define B2056_SYN_LOGEN_CMOSTX1 0x9B
717#define B2056_SYN_LOGEN_CMOSTX2 0x9C
718#define B2056_SYN_LOGEN_CMOSTX3 0x9D
719#define B2056_SYN_LOGEN_CMOSTX4 0x9E
720#define B2056_SYN_LOGEN_VCOBUF2_OVRVAL 0x9F
721#define B2056_SYN_LOGEN_MIXER3_OVRVAL 0xA0
722#define B2056_SYN_LOGEN_BUF5_OVRVAL 0xA1
723#define B2056_SYN_LOGEN_BUF6_OVRVAL 0xA2
724#define B2056_SYN_LOGEN_CBUFRX1_OVRVAL 0xA3
725#define B2056_SYN_LOGEN_CBUFRX2_OVRVAL 0xA4
726#define B2056_SYN_LOGEN_CBUFRX3_OVRVAL 0xA5
727#define B2056_SYN_LOGEN_CBUFRX4_OVRVAL 0xA6
728#define B2056_SYN_LOGEN_CBUFTX1_OVRVAL 0xA7
729#define B2056_SYN_LOGEN_CBUFTX2_OVRVAL 0xA8
730#define B2056_SYN_LOGEN_CBUFTX3_OVRVAL 0xA9
731#define B2056_SYN_LOGEN_CBUFTX4_OVRVAL 0xAA
732#define B2056_SYN_LOGEN_CMOSRX1_OVRVAL 0xAB
733#define B2056_SYN_LOGEN_CMOSRX2_OVRVAL 0xAC
734#define B2056_SYN_LOGEN_CMOSRX3_OVRVAL 0xAD
735#define B2056_SYN_LOGEN_CMOSRX4_OVRVAL 0xAE
736#define B2056_SYN_LOGEN_CMOSTX1_OVRVAL 0xAF
737#define B2056_SYN_LOGEN_CMOSTX2_OVRVAL 0xB0
738#define B2056_SYN_LOGEN_CMOSTX3_OVRVAL 0xB1
739#define B2056_SYN_LOGEN_CMOSTX4_OVRVAL 0xB2
740#define B2056_SYN_LOGEN_ACL_WAITCNT 0xB3
741#define B2056_SYN_LOGEN_CORE_CALVALID 0xB4
742#define B2056_SYN_LOGEN_RX_CMOS_CALVALID 0xB5
743#define B2056_SYN_LOGEN_TX_CMOS_VALID 0xB6
744
745#define B2056_TX_RESERVED_ADDR0 0x00
746#define B2056_TX_IDCODE 0x01
747#define B2056_TX_RESERVED_ADDR2 0x02
748#define B2056_TX_RESERVED_ADDR3 0x03
749#define B2056_TX_RESERVED_ADDR4 0x04
750#define B2056_TX_RESERVED_ADDR5 0x05
751#define B2056_TX_RESERVED_ADDR6 0x06
752#define B2056_TX_RESERVED_ADDR7 0x07
753#define B2056_TX_COM_CTRL 0x08
754#define B2056_TX_COM_PU 0x09
755#define B2056_TX_COM_OVR 0x0A
756#define B2056_TX_COM_RESET 0x0B
757#define B2056_TX_COM_RCAL 0x0C
758#define B2056_TX_COM_RC_RXLPF 0x0D
759#define B2056_TX_COM_RC_TXLPF 0x0E
760#define B2056_TX_COM_RC_RXHPF 0x0F
761#define B2056_TX_RESERVED_ADDR16 0x10
762#define B2056_TX_RESERVED_ADDR17 0x11
763#define B2056_TX_RESERVED_ADDR18 0x12
764#define B2056_TX_RESERVED_ADDR19 0x13
765#define B2056_TX_RESERVED_ADDR20 0x14
766#define B2056_TX_RESERVED_ADDR21 0x15
767#define B2056_TX_RESERVED_ADDR22 0x16
768#define B2056_TX_RESERVED_ADDR23 0x17
769#define B2056_TX_RESERVED_ADDR24 0x18
770#define B2056_TX_RESERVED_ADDR25 0x19
771#define B2056_TX_RESERVED_ADDR26 0x1A
772#define B2056_TX_RESERVED_ADDR27 0x1B
773#define B2056_TX_RESERVED_ADDR28 0x1C
774#define B2056_TX_RESERVED_ADDR29 0x1D
775#define B2056_TX_RESERVED_ADDR30 0x1E
776#define B2056_TX_RESERVED_ADDR31 0x1F
777#define B2056_TX_IQCAL_GAIN_BW 0x20
778#define B2056_TX_LOFT_FINE_I 0x21
779#define B2056_TX_LOFT_FINE_Q 0x22
780#define B2056_TX_LOFT_COARSE_I 0x23
781#define B2056_TX_LOFT_COARSE_Q 0x24
782#define B2056_TX_TX_COM_MASTER1 0x25
783#define B2056_TX_TX_COM_MASTER2 0x26
784#define B2056_TX_RXIQCAL_TXMUX 0x27
785#define B2056_TX_TX_SSI_MASTER 0x28
786#define B2056_TX_IQCAL_VCM_HG 0x29
787#define B2056_TX_IQCAL_IDAC 0x2A
788#define B2056_TX_TSSI_VCM 0x2B
789#define B2056_TX_TX_AMP_DET 0x2C
790#define B2056_TX_TX_SSI_MUX 0x2D
791#define B2056_TX_TSSIA 0x2E
792#define B2056_TX_TSSIG 0x2F
793#define B2056_TX_TSSI_MISC1 0x30
794#define B2056_TX_TSSI_MISC2 0x31
795#define B2056_TX_TSSI_MISC3 0x32
796#define B2056_TX_PA_SPARE1 0x33
797#define B2056_TX_PA_SPARE2 0x34
798#define B2056_TX_INTPAA_MASTER 0x35
799#define B2056_TX_INTPAA_GAIN 0x36
800#define B2056_TX_INTPAA_BOOST_TUNE 0x37
801#define B2056_TX_INTPAA_IAUX_STAT 0x38
802#define B2056_TX_INTPAA_IAUX_DYN 0x39
803#define B2056_TX_INTPAA_IMAIN_STAT 0x3A
804#define B2056_TX_INTPAA_IMAIN_DYN 0x3B
805#define B2056_TX_INTPAA_CASCBIAS 0x3C
806#define B2056_TX_INTPAA_PASLOPE 0x3D
807#define B2056_TX_INTPAA_PA_MISC 0x3E
808#define B2056_TX_INTPAG_MASTER 0x3F
809#define B2056_TX_INTPAG_GAIN 0x40
810#define B2056_TX_INTPAG_BOOST_TUNE 0x41
811#define B2056_TX_INTPAG_IAUX_STAT 0x42
812#define B2056_TX_INTPAG_IAUX_DYN 0x43
813#define B2056_TX_INTPAG_IMAIN_STAT 0x44
814#define B2056_TX_INTPAG_IMAIN_DYN 0x45
815#define B2056_TX_INTPAG_CASCBIAS 0x46
816#define B2056_TX_INTPAG_PASLOPE 0x47
817#define B2056_TX_INTPAG_PA_MISC 0x48
818#define B2056_TX_PADA_MASTER 0x49
819#define B2056_TX_PADA_IDAC 0x4A
820#define B2056_TX_PADA_CASCBIAS 0x4B
821#define B2056_TX_PADA_GAIN 0x4C
822#define B2056_TX_PADA_BOOST_TUNE 0x4D
823#define B2056_TX_PADA_SLOPE 0x4E
824#define B2056_TX_PADG_MASTER 0x4F
825#define B2056_TX_PADG_IDAC 0x50
826#define B2056_TX_PADG_CASCBIAS 0x51
827#define B2056_TX_PADG_GAIN 0x52
828#define B2056_TX_PADG_BOOST_TUNE 0x53
829#define B2056_TX_PADG_SLOPE 0x54
830#define B2056_TX_PGAA_MASTER 0x55
831#define B2056_TX_PGAA_IDAC 0x56
832#define B2056_TX_PGAA_GAIN 0x57
833#define B2056_TX_PGAA_BOOST_TUNE 0x58
834#define B2056_TX_PGAA_SLOPE 0x59
835#define B2056_TX_PGAA_MISC 0x5A
836#define B2056_TX_PGAG_MASTER 0x5B
837#define B2056_TX_PGAG_IDAC 0x5C
838#define B2056_TX_PGAG_GAIN 0x5D
839#define B2056_TX_PGAG_BOOST_TUNE 0x5E
840#define B2056_TX_PGAG_SLOPE 0x5F
841#define B2056_TX_PGAG_MISC 0x60
842#define B2056_TX_MIXA_MASTER 0x61
843#define B2056_TX_MIXA_BOOST_TUNE 0x62
844#define B2056_TX_MIXG 0x63
845#define B2056_TX_MIXG_BOOST_TUNE 0x64
846#define B2056_TX_BB_GM_MASTER 0x65
847#define B2056_TX_GMBB_GM 0x66
848#define B2056_TX_GMBB_IDAC 0x67
849#define B2056_TX_TXLPF_MASTER 0x68
850#define B2056_TX_TXLPF_RCCAL 0x69
851#define B2056_TX_TXLPF_RCCAL_OFF0 0x6A
852#define B2056_TX_TXLPF_RCCAL_OFF1 0x6B
853#define B2056_TX_TXLPF_RCCAL_OFF2 0x6C
854#define B2056_TX_TXLPF_RCCAL_OFF3 0x6D
855#define B2056_TX_TXLPF_RCCAL_OFF4 0x6E
856#define B2056_TX_TXLPF_RCCAL_OFF5 0x6F
857#define B2056_TX_TXLPF_RCCAL_OFF6 0x70
858#define B2056_TX_TXLPF_BW 0x71
859#define B2056_TX_TXLPF_GAIN 0x72
860#define B2056_TX_TXLPF_IDAC 0x73
861#define B2056_TX_TXLPF_IDAC_0 0x74
862#define B2056_TX_TXLPF_IDAC_1 0x75
863#define B2056_TX_TXLPF_IDAC_2 0x76
864#define B2056_TX_TXLPF_IDAC_3 0x77
865#define B2056_TX_TXLPF_IDAC_4 0x78
866#define B2056_TX_TXLPF_IDAC_5 0x79
867#define B2056_TX_TXLPF_IDAC_6 0x7A
868#define B2056_TX_TXLPF_OPAMP_IDAC 0x7B
869#define B2056_TX_TXLPF_MISC 0x7C
870#define B2056_TX_TXSPARE1 0x7D
871#define B2056_TX_TXSPARE2 0x7E
872#define B2056_TX_TXSPARE3 0x7F
873#define B2056_TX_TXSPARE4 0x80
874#define B2056_TX_TXSPARE5 0x81
875#define B2056_TX_TXSPARE6 0x82
876#define B2056_TX_TXSPARE7 0x83
877#define B2056_TX_TXSPARE8 0x84
878#define B2056_TX_TXSPARE9 0x85
879#define B2056_TX_TXSPARE10 0x86
880#define B2056_TX_TXSPARE11 0x87
881#define B2056_TX_TXSPARE12 0x88
882#define B2056_TX_TXSPARE13 0x89
883#define B2056_TX_TXSPARE14 0x8A
884#define B2056_TX_TXSPARE15 0x8B
885#define B2056_TX_TXSPARE16 0x8C
886#define B2056_TX_STATUS_INTPA_GAIN 0x8D
887#define B2056_TX_STATUS_PAD_GAIN 0x8E
888#define B2056_TX_STATUS_PGA_GAIN 0x8F
889#define B2056_TX_STATUS_GM_TXLPF_GAIN 0x90
890#define B2056_TX_STATUS_TXLPF_BW 0x91
891#define B2056_TX_STATUS_TXLPF_RC 0x92
892#define B2056_TX_GMBB_IDAC0 0x93
893#define B2056_TX_GMBB_IDAC1 0x94
894#define B2056_TX_GMBB_IDAC2 0x95
895#define B2056_TX_GMBB_IDAC3 0x96
896#define B2056_TX_GMBB_IDAC4 0x97
897#define B2056_TX_GMBB_IDAC5 0x98
898#define B2056_TX_GMBB_IDAC6 0x99
899#define B2056_TX_GMBB_IDAC7 0x9A
900
901#define B2056_RX_RESERVED_ADDR0 0x00
902#define B2056_RX_IDCODE 0x01
903#define B2056_RX_RESERVED_ADDR2 0x02
904#define B2056_RX_RESERVED_ADDR3 0x03
905#define B2056_RX_RESERVED_ADDR4 0x04
906#define B2056_RX_RESERVED_ADDR5 0x05
907#define B2056_RX_RESERVED_ADDR6 0x06
908#define B2056_RX_RESERVED_ADDR7 0x07
909#define B2056_RX_COM_CTRL 0x08
910#define B2056_RX_COM_PU 0x09
911#define B2056_RX_COM_OVR 0x0A
912#define B2056_RX_COM_RESET 0x0B
913#define B2056_RX_COM_RCAL 0x0C
914#define B2056_RX_COM_RC_RXLPF 0x0D
915#define B2056_RX_COM_RC_TXLPF 0x0E
916#define B2056_RX_COM_RC_RXHPF 0x0F
917#define B2056_RX_RESERVED_ADDR16 0x10
918#define B2056_RX_RESERVED_ADDR17 0x11
919#define B2056_RX_RESERVED_ADDR18 0x12
920#define B2056_RX_RESERVED_ADDR19 0x13
921#define B2056_RX_RESERVED_ADDR20 0x14
922#define B2056_RX_RESERVED_ADDR21 0x15
923#define B2056_RX_RESERVED_ADDR22 0x16
924#define B2056_RX_RESERVED_ADDR23 0x17
925#define B2056_RX_RESERVED_ADDR24 0x18
926#define B2056_RX_RESERVED_ADDR25 0x19
927#define B2056_RX_RESERVED_ADDR26 0x1A
928#define B2056_RX_RESERVED_ADDR27 0x1B
929#define B2056_RX_RESERVED_ADDR28 0x1C
930#define B2056_RX_RESERVED_ADDR29 0x1D
931#define B2056_RX_RESERVED_ADDR30 0x1E
932#define B2056_RX_RESERVED_ADDR31 0x1F
933#define B2056_RX_RXIQCAL_RXMUX 0x20
934#define B2056_RX_RSSI_PU 0x21
935#define B2056_RX_RSSI_SEL 0x22
936#define B2056_RX_RSSI_GAIN 0x23
937#define B2056_RX_RSSI_NB_IDAC 0x24
938#define B2056_RX_RSSI_WB2I_IDAC_1 0x25
939#define B2056_RX_RSSI_WB2I_IDAC_2 0x26
940#define B2056_RX_RSSI_WB2Q_IDAC_1 0x27
941#define B2056_RX_RSSI_WB2Q_IDAC_2 0x28
942#define B2056_RX_RSSI_POLE 0x29
943#define B2056_RX_RSSI_WB1_IDAC 0x2A
944#define B2056_RX_RSSI_MISC 0x2B
945#define B2056_RX_LNAA_MASTER 0x2C
946#define B2056_RX_LNAA_TUNE 0x2D
947#define B2056_RX_LNAA_GAIN 0x2E
948#define B2056_RX_LNA_A_SLOPE 0x2F
949#define B2056_RX_BIASPOLE_LNAA1_IDAC 0x30
950#define B2056_RX_LNAA2_IDAC 0x31
951#define B2056_RX_LNA1A_MISC 0x32
952#define B2056_RX_LNAG_MASTER 0x33
953#define B2056_RX_LNAG_TUNE 0x34
954#define B2056_RX_LNAG_GAIN 0x35
955#define B2056_RX_LNA_G_SLOPE 0x36
956#define B2056_RX_BIASPOLE_LNAG1_IDAC 0x37
957#define B2056_RX_LNAG2_IDAC 0x38
958#define B2056_RX_LNA1G_MISC 0x39
959#define B2056_RX_MIXA_MASTER 0x3A
960#define B2056_RX_MIXA_VCM 0x3B
961#define B2056_RX_MIXA_CTRLPTAT 0x3C
962#define B2056_RX_MIXA_LOB_BIAS 0x3D
963#define B2056_RX_MIXA_CORE_IDAC 0x3E
964#define B2056_RX_MIXA_CMFB_IDAC 0x3F
965#define B2056_RX_MIXA_BIAS_AUX 0x40
966#define B2056_RX_MIXA_BIAS_MAIN 0x41
967#define B2056_RX_MIXA_BIAS_MISC 0x42
968#define B2056_RX_MIXA_MAST_BIAS 0x43
969#define B2056_RX_MIXG_MASTER 0x44
970#define B2056_RX_MIXG_VCM 0x45
971#define B2056_RX_MIXG_CTRLPTAT 0x46
972#define B2056_RX_MIXG_LOB_BIAS 0x47
973#define B2056_RX_MIXG_CORE_IDAC 0x48
974#define B2056_RX_MIXG_CMFB_IDAC 0x49
975#define B2056_RX_MIXG_BIAS_AUX 0x4A
976#define B2056_RX_MIXG_BIAS_MAIN 0x4B
977#define B2056_RX_MIXG_BIAS_MISC 0x4C
978#define B2056_RX_MIXG_MAST_BIAS 0x4D
979#define B2056_RX_TIA_MASTER 0x4E
980#define B2056_RX_TIA_IOPAMP 0x4F
981#define B2056_RX_TIA_QOPAMP 0x50
982#define B2056_RX_TIA_IMISC 0x51
983#define B2056_RX_TIA_QMISC 0x52
984#define B2056_RX_TIA_GAIN 0x53
985#define B2056_RX_TIA_SPARE1 0x54
986#define B2056_RX_TIA_SPARE2 0x55
987#define B2056_RX_BB_LPF_MASTER 0x56
988#define B2056_RX_AACI_MASTER 0x57
989#define B2056_RX_RXLPF_IDAC 0x58
990#define B2056_RX_RXLPF_OPAMPBIAS_LOWQ 0x59
991#define B2056_RX_RXLPF_OPAMPBIAS_HIGHQ 0x5A
992#define B2056_RX_RXLPF_BIAS_DCCANCEL 0x5B
993#define B2056_RX_RXLPF_OUTVCM 0x5C
994#define B2056_RX_RXLPF_INVCM_BODY 0x5D
995#define B2056_RX_RXLPF_CC_OP 0x5E
996#define B2056_RX_RXLPF_GAIN 0x5F
997#define B2056_RX_RXLPF_Q_BW 0x60
998#define B2056_RX_RXLPF_HP_CORNER_BW 0x61
999#define B2056_RX_RXLPF_RCCAL_HPC 0x62
1000#define B2056_RX_RXHPF_OFF0 0x63
1001#define B2056_RX_RXHPF_OFF1 0x64
1002#define B2056_RX_RXHPF_OFF2 0x65
1003#define B2056_RX_RXHPF_OFF3 0x66
1004#define B2056_RX_RXHPF_OFF4 0x67
1005#define B2056_RX_RXHPF_OFF5 0x68
1006#define B2056_RX_RXHPF_OFF6 0x69
1007#define B2056_RX_RXHPF_OFF7 0x6A
1008#define B2056_RX_RXLPF_RCCAL_LPC 0x6B
1009#define B2056_RX_RXLPF_OFF_0 0x6C
1010#define B2056_RX_RXLPF_OFF_1 0x6D
1011#define B2056_RX_RXLPF_OFF_2 0x6E
1012#define B2056_RX_RXLPF_OFF_3 0x6F
1013#define B2056_RX_RXLPF_OFF_4 0x70
1014#define B2056_RX_UNUSED 0x71
1015#define B2056_RX_VGA_MASTER 0x72
1016#define B2056_RX_VGA_BIAS 0x73
1017#define B2056_RX_VGA_BIAS_DCCANCEL 0x74
1018#define B2056_RX_VGA_GAIN 0x75
1019#define B2056_RX_VGA_HP_CORNER_BW 0x76
1020#define B2056_RX_VGABUF_BIAS 0x77
1021#define B2056_RX_VGABUF_GAIN_BW 0x78
1022#define B2056_RX_TXFBMIX_A 0x79
1023#define B2056_RX_TXFBMIX_G 0x7A
1024#define B2056_RX_RXSPARE1 0x7B
1025#define B2056_RX_RXSPARE2 0x7C
1026#define B2056_RX_RXSPARE3 0x7D
1027#define B2056_RX_RXSPARE4 0x7E
1028#define B2056_RX_RXSPARE5 0x7F
1029#define B2056_RX_RXSPARE6 0x80
1030#define B2056_RX_RXSPARE7 0x81
1031#define B2056_RX_RXSPARE8 0x82
1032#define B2056_RX_RXSPARE9 0x83
1033#define B2056_RX_RXSPARE10 0x84
1034#define B2056_RX_RXSPARE11 0x85
1035#define B2056_RX_RXSPARE12 0x86
1036#define B2056_RX_RXSPARE13 0x87
1037#define B2056_RX_RXSPARE14 0x88
1038#define B2056_RX_RXSPARE15 0x89
1039#define B2056_RX_RXSPARE16 0x8A
1040#define B2056_RX_STATUS_LNAA_GAIN 0x8B
1041#define B2056_RX_STATUS_LNAG_GAIN 0x8C
1042#define B2056_RX_STATUS_MIXTIA_GAIN 0x8D
1043#define B2056_RX_STATUS_RXLPF_GAIN 0x8E
1044#define B2056_RX_STATUS_VGA_BUF_GAIN 0x8F
1045#define B2056_RX_STATUS_RXLPF_Q 0x90
1046#define B2056_RX_STATUS_RXLPF_BUF_BW 0x91
1047#define B2056_RX_STATUS_RXLPF_VGA_HPC 0x92
1048#define B2056_RX_STATUS_RXLPF_RC 0x93
1049#define B2056_RX_STATUS_HPC_RC 0x94
1050
1051#define B2056_LNA1_A_PU 0x01
1052#define B2056_LNA2_A_PU 0x02
1053#define B2056_LNA1_G_PU 0x01
1054#define B2056_LNA2_G_PU 0x02
1055#define B2056_MIXA_PU_I 0x01
1056#define B2056_MIXA_PU_Q 0x02
1057#define B2056_MIXA_PU_GM 0x10
1058#define B2056_MIXG_PU_I 0x01
1059#define B2056_MIXG_PU_Q 0x02
1060#define B2056_MIXG_PU_GM 0x10
1061#define B2056_TIA_PU 0x01
1062#define B2056_BB_LPF_PU 0x20
1063#define B2056_W1_PU 0x02
1064#define B2056_W2_PU 0x04
1065#define B2056_NB_PU 0x08
1066#define B2056_RSSI_W1_SEL 0x02
1067#define B2056_RSSI_W2_SEL 0x04
1068#define B2056_RSSI_NB_SEL 0x08
1069#define B2056_VCM_MASK 0x1C
1070#define B2056_RSSI_VCM_SHIFT 0x02
1071
31struct b43_nphy_channeltab_entry_rev3 { 1072struct b43_nphy_channeltab_entry_rev3 {
32 /* The channel number */
33 u8 channel;
34 /* The channel frequency in MHz */ 1073 /* The channel frequency in MHz */
35 u16 freq; 1074 u16 freq;
36 /* Radio register values on channelswitch */ 1075 /* Radio register values on channelswitch */
37 /* TODO */ 1076 u8 radio_syn_pll_vcocal1;
1077 u8 radio_syn_pll_vcocal2;
1078 u8 radio_syn_pll_refdiv;
1079 u8 radio_syn_pll_mmd2;
1080 u8 radio_syn_pll_mmd1;
1081 u8 radio_syn_pll_loopfilter1;
1082 u8 radio_syn_pll_loopfilter2;
1083 u8 radio_syn_pll_loopfilter3;
1084 u8 radio_syn_pll_loopfilter4;
1085 u8 radio_syn_pll_loopfilter5;
1086 u8 radio_syn_reserved_addr27;
1087 u8 radio_syn_reserved_addr28;
1088 u8 radio_syn_reserved_addr29;
1089 u8 radio_syn_logen_vcobuf1;
1090 u8 radio_syn_logen_mixer2;
1091 u8 radio_syn_logen_buf3;
1092 u8 radio_syn_logen_buf4;
1093 u8 radio_rx0_lnaa_tune;
1094 u8 radio_rx0_lnag_tune;
1095 u8 radio_tx0_intpaa_boost_tune;
1096 u8 radio_tx0_intpag_boost_tune;
1097 u8 radio_tx0_pada_boost_tune;
1098 u8 radio_tx0_padg_boost_tune;
1099 u8 radio_tx0_pgaa_boost_tune;
1100 u8 radio_tx0_pgag_boost_tune;
1101 u8 radio_tx0_mixa_boost_tune;
1102 u8 radio_tx0_mixg_boost_tune;
1103 u8 radio_rx1_lnaa_tune;
1104 u8 radio_rx1_lnag_tune;
1105 u8 radio_tx1_intpaa_boost_tune;
1106 u8 radio_tx1_intpag_boost_tune;
1107 u8 radio_tx1_pada_boost_tune;
1108 u8 radio_tx1_padg_boost_tune;
1109 u8 radio_tx1_pgaa_boost_tune;
1110 u8 radio_tx1_pgag_boost_tune;
1111 u8 radio_tx1_mixa_boost_tune;
1112 u8 radio_tx1_mixg_boost_tune;
38 /* PHY register values on channelswitch */ 1113 /* PHY register values on channelswitch */
39 struct b43_phy_n_sfo_cfg phy_regs; 1114 struct b43_phy_n_sfo_cfg phy_regs;
40}; 1115};
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 78016ae21c5..86bc0a0f735 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -28,23 +28,8 @@
28/* Returns TRUE, if the radio is enabled in hardware. */ 28/* Returns TRUE, if the radio is enabled in hardware. */
29bool b43_is_hw_radio_enabled(struct b43_wldev *dev) 29bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
30{ 30{
31 if (dev->phy.rev >= 3 || dev->phy.type == B43_PHYTYPE_LP) { 31 return !(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI)
32 if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI) 32 & B43_MMIO_RADIO_HWENABLED_HI_MASK);
33 & B43_MMIO_RADIO_HWENABLED_HI_MASK))
34 return 1;
35 } else {
36 /* To prevent CPU fault on PPC, do not read a register
37 * unless the interface is started; however, on resume
38 * for hibernation, this routine is entered early. When
39 * that happens, unconditionally return TRUE.
40 */
41 if (b43_status(dev) < B43_STAT_STARTED)
42 return 1;
43 if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
44 & B43_MMIO_RADIO_HWENABLED_LO_MASK)
45 return 1;
46 }
47 return 0;
48} 33}
49 34
50/* The poll callback for the hardware button. */ 35/* The poll callback for the hardware button. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 67f18ecdb3b..1f11e1670bf 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -181,52 +181,75 @@ static int b43legacy_ratelimit(struct b43legacy_wl *wl)
181 181
182void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...) 182void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...)
183{ 183{
184 struct va_format vaf;
184 va_list args; 185 va_list args;
185 186
186 if (!b43legacy_ratelimit(wl)) 187 if (!b43legacy_ratelimit(wl))
187 return; 188 return;
189
188 va_start(args, fmt); 190 va_start(args, fmt);
189 printk(KERN_INFO "b43legacy-%s: ", 191
190 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 192 vaf.fmt = fmt;
191 vprintk(fmt, args); 193 vaf.va = &args;
194
195 printk(KERN_INFO "b43legacy-%s: %pV",
196 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
197
192 va_end(args); 198 va_end(args);
193} 199}
194 200
195void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...) 201void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...)
196{ 202{
203 struct va_format vaf;
197 va_list args; 204 va_list args;
198 205
199 if (!b43legacy_ratelimit(wl)) 206 if (!b43legacy_ratelimit(wl))
200 return; 207 return;
208
201 va_start(args, fmt); 209 va_start(args, fmt);
202 printk(KERN_ERR "b43legacy-%s ERROR: ", 210
203 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 211 vaf.fmt = fmt;
204 vprintk(fmt, args); 212 vaf.va = &args;
213
214 printk(KERN_ERR "b43legacy-%s ERROR: %pV",
215 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
216
205 va_end(args); 217 va_end(args);
206} 218}
207 219
208void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...) 220void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...)
209{ 221{
222 struct va_format vaf;
210 va_list args; 223 va_list args;
211 224
212 if (!b43legacy_ratelimit(wl)) 225 if (!b43legacy_ratelimit(wl))
213 return; 226 return;
227
214 va_start(args, fmt); 228 va_start(args, fmt);
215 printk(KERN_WARNING "b43legacy-%s warning: ", 229
216 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 230 vaf.fmt = fmt;
217 vprintk(fmt, args); 231 vaf.va = &args;
232
233 printk(KERN_WARNING "b43legacy-%s warning: %pV",
234 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
235
218 va_end(args); 236 va_end(args);
219} 237}
220 238
221#if B43legacy_DEBUG 239#if B43legacy_DEBUG
222void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...) 240void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...)
223{ 241{
242 struct va_format vaf;
224 va_list args; 243 va_list args;
225 244
226 va_start(args, fmt); 245 va_start(args, fmt);
227 printk(KERN_DEBUG "b43legacy-%s debug: ", 246
228 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 247 vaf.fmt = fmt;
229 vprintk(fmt, args); 248 vaf.va = &args;
249
250 printk(KERN_DEBUG "b43legacy-%s debug: %pV",
251 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
252
230 va_end(args); 253 va_end(args);
231} 254}
232#endif /* DEBUG */ 255#endif /* DEBUG */
diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c
index d579df72b78..b90f223fb31 100644
--- a/drivers/net/wireless/b43legacy/rfkill.c
+++ b/drivers/net/wireless/b43legacy/rfkill.c
@@ -29,7 +29,7 @@
29/* Returns TRUE, if the radio is enabled in hardware. */ 29/* Returns TRUE, if the radio is enabled in hardware. */
30bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev) 30bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
31{ 31{
32 if (dev->phy.rev >= 3) { 32 if (dev->dev->id.revision >= 3) {
33 if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI) 33 if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI)
34 & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK)) 34 & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK))
35 return 1; 35 return 1;
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index b82364258dc..ed424574160 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -106,6 +106,9 @@ config IWL5000
106 Intel WiFi Link 1000BGN 106 Intel WiFi Link 1000BGN
107 Intel Wireless WiFi 5150AGN 107 Intel Wireless WiFi 5150AGN
108 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN 108 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
109 Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B)
110 Intel WIreless WiFi Link 6050BGN Gen 2 Adapter
111 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
109 112
110config IWL3945 113config IWL3945
111 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 114 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 63edbe2e557..93380f97835 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -2,20 +2,27 @@ obj-$(CONFIG_IWLWIFI) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o 2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o 3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
4iwlcore-objs += iwl-scan.o iwl-led.o 4iwlcore-objs += iwl-scan.o iwl-led.o
5iwlcore-$(CONFIG_IWL3945) += iwl-legacy.o
6iwlcore-$(CONFIG_IWL4965) += iwl-legacy.o
5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 7iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
6iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 8iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
7 9
10# If 3945 is selected only, iwl-legacy.o will be added
11# to iwlcore-m above, but it needs to be built in.
12iwlcore-objs += $(iwlcore-m)
13
8CFLAGS_iwl-devtrace.o := -I$(src) 14CFLAGS_iwl-devtrace.o := -I$(src)
9 15
10# AGN 16# AGN
11obj-$(CONFIG_IWLAGN) += iwlagn.o 17obj-$(CONFIG_IWLAGN) += iwlagn.o
12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o 18iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o 19iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
14iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o 20iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
15iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o 21iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
16iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o 22iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
17 23
18iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 24iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
25iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
19iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 26iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
20iwlagn-$(CONFIG_IWL5000) += iwl-6000.o 27iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
21iwlagn-$(CONFIG_IWL5000) += iwl-1000.o 28iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index db540910b11..3100a72b9b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -211,14 +211,16 @@ static struct iwl_lib_ops iwl1000_lib = {
211 .calib_version = iwlagn_eeprom_calib_version, 211 .calib_version = iwlagn_eeprom_calib_version,
212 .query_addr = iwlagn_eeprom_query_addr, 212 .query_addr = iwlagn_eeprom_query_addr,
213 }, 213 },
214 .post_associate = iwl_post_associate, 214 .isr_ops = {
215 .isr = iwl_isr_ict, 215 .isr = iwl_isr_ict,
216 .config_ap = iwl_config_ap, 216 .free = iwl_free_isr_ict,
217 .alloc = iwl_alloc_isr_ict,
218 .reset = iwl_reset_ict,
219 .disable = iwl_disable_ict,
220 },
217 .temp_ops = { 221 .temp_ops = {
218 .temperature = iwlagn_temperature, 222 .temperature = iwlagn_temperature,
219 }, 223 },
220 .manage_ibss_station = iwlagn_manage_ibss_station,
221 .update_bcast_stations = iwl_update_bcast_stations,
222 .debugfs_ops = { 224 .debugfs_ops = {
223 .rx_stats_read = iwl_ucode_rx_stats_read, 225 .rx_stats_read = iwl_ucode_rx_stats_read,
224 .tx_stats_read = iwl_ucode_tx_stats_read, 226 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -243,6 +245,7 @@ static const struct iwl_ops iwl1000_ops = {
243 .hcmd = &iwlagn_hcmd, 245 .hcmd = &iwlagn_hcmd,
244 .utils = &iwlagn_hcmd_utils, 246 .utils = &iwlagn_hcmd_utils,
245 .led = &iwlagn_led_ops, 247 .led = &iwlagn_led_ops,
248 .ieee80211_ops = &iwlagn_hw_ops,
246}; 249};
247 250
248static struct iwl_base_params iwl1000_base_params = { 251static struct iwl_base_params iwl1000_base_params = {
@@ -275,7 +278,6 @@ struct iwl_cfg iwl1000_bgn_cfg = {
275 .fw_name_pre = IWL1000_FW_PRE, 278 .fw_name_pre = IWL1000_FW_PRE,
276 .ucode_api_max = IWL1000_UCODE_API_MAX, 279 .ucode_api_max = IWL1000_UCODE_API_MAX,
277 .ucode_api_min = IWL1000_UCODE_API_MIN, 280 .ucode_api_min = IWL1000_UCODE_API_MIN,
278 .sku = IWL_SKU_G|IWL_SKU_N,
279 .valid_tx_ant = ANT_A, 281 .valid_tx_ant = ANT_A,
280 .valid_rx_ant = ANT_AB, 282 .valid_rx_ant = ANT_AB,
281 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 283 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
@@ -284,6 +286,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
284 .mod_params = &iwlagn_mod_params, 286 .mod_params = &iwlagn_mod_params,
285 .base_params = &iwl1000_base_params, 287 .base_params = &iwl1000_base_params,
286 .ht_params = &iwl1000_ht_params, 288 .ht_params = &iwl1000_ht_params,
289 .led_mode = IWL_LED_BLINK,
287}; 290};
288 291
289struct iwl_cfg iwl1000_bg_cfg = { 292struct iwl_cfg iwl1000_bg_cfg = {
@@ -291,7 +294,6 @@ struct iwl_cfg iwl1000_bg_cfg = {
291 .fw_name_pre = IWL1000_FW_PRE, 294 .fw_name_pre = IWL1000_FW_PRE,
292 .ucode_api_max = IWL1000_UCODE_API_MAX, 295 .ucode_api_max = IWL1000_UCODE_API_MAX,
293 .ucode_api_min = IWL1000_UCODE_API_MIN, 296 .ucode_api_min = IWL1000_UCODE_API_MIN,
294 .sku = IWL_SKU_G,
295 .valid_tx_ant = ANT_A, 297 .valid_tx_ant = ANT_A,
296 .valid_rx_ant = ANT_AB, 298 .valid_rx_ant = ANT_AB,
297 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 299 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
@@ -299,6 +301,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
299 .ops = &iwl1000_ops, 301 .ops = &iwl1000_ops,
300 .mod_params = &iwlagn_mod_params, 302 .mod_params = &iwlagn_mod_params,
301 .base_params = &iwl1000_base_params, 303 .base_params = &iwl1000_base_params,
304 .led_mode = IWL_LED_BLINK,
302}; 305};
303 306
304struct iwl_cfg iwl100_bgn_cfg = { 307struct iwl_cfg iwl100_bgn_cfg = {
@@ -306,7 +309,6 @@ struct iwl_cfg iwl100_bgn_cfg = {
306 .fw_name_pre = IWL100_FW_PRE, 309 .fw_name_pre = IWL100_FW_PRE,
307 .ucode_api_max = IWL100_UCODE_API_MAX, 310 .ucode_api_max = IWL100_UCODE_API_MAX,
308 .ucode_api_min = IWL100_UCODE_API_MIN, 311 .ucode_api_min = IWL100_UCODE_API_MIN,
309 .sku = IWL_SKU_G|IWL_SKU_N,
310 .valid_tx_ant = ANT_A, 312 .valid_tx_ant = ANT_A,
311 .valid_rx_ant = ANT_A, 313 .valid_rx_ant = ANT_A,
312 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 314 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
@@ -315,6 +317,7 @@ struct iwl_cfg iwl100_bgn_cfg = {
315 .mod_params = &iwlagn_mod_params, 317 .mod_params = &iwlagn_mod_params,
316 .base_params = &iwl1000_base_params, 318 .base_params = &iwl1000_base_params,
317 .ht_params = &iwl1000_ht_params, 319 .ht_params = &iwl1000_ht_params,
320 .led_mode = IWL_LED_RF_STATE,
318}; 321};
319 322
320struct iwl_cfg iwl100_bg_cfg = { 323struct iwl_cfg iwl100_bg_cfg = {
@@ -322,7 +325,6 @@ struct iwl_cfg iwl100_bg_cfg = {
322 .fw_name_pre = IWL100_FW_PRE, 325 .fw_name_pre = IWL100_FW_PRE,
323 .ucode_api_max = IWL100_UCODE_API_MAX, 326 .ucode_api_max = IWL100_UCODE_API_MAX,
324 .ucode_api_min = IWL100_UCODE_API_MIN, 327 .ucode_api_min = IWL100_UCODE_API_MIN,
325 .sku = IWL_SKU_G,
326 .valid_tx_ant = ANT_A, 328 .valid_tx_ant = ANT_A,
327 .valid_rx_ant = ANT_A, 329 .valid_rx_ant = ANT_A,
328 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 330 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
@@ -330,6 +332,7 @@ struct iwl_cfg iwl100_bg_cfg = {
330 .ops = &iwl1000_ops, 332 .ops = &iwl1000_ops,
331 .mod_params = &iwlagn_mod_params, 333 .mod_params = &iwlagn_mod_params,
332 .base_params = &iwl1000_base_params, 334 .base_params = &iwl1000_base_params,
335 .led_mode = IWL_LED_RF_STATE,
333}; 336};
334 337
335MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); 338MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 176e5257767..d39f449a9bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -51,6 +51,7 @@
51#include "iwl-led.h" 51#include "iwl-led.h"
52#include "iwl-3945-led.h" 52#include "iwl-3945-led.h"
53#include "iwl-3945-debugfs.h" 53#include "iwl-3945-debugfs.h"
54#include "iwl-legacy.h"
54 55
55#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ 56#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
56 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 57 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -115,7 +116,7 @@ void iwl3945_disable_events(struct iwl_priv *priv)
115 u32 base; /* SRAM address of event log header */ 116 u32 base; /* SRAM address of event log header */
116 u32 disable_ptr; /* SRAM address of event-disable bitmap array */ 117 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
117 u32 array_size; /* # of u32 entries in array */ 118 u32 array_size; /* # of u32 entries in array */
118 u32 evt_disable[IWL_EVT_DISABLE_SIZE] = { 119 static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
119 0x00000000, /* 31 - 0 Event id numbers */ 120 0x00000000, /* 31 - 0 Event id numbers */
120 0x00000000, /* 63 - 32 */ 121 0x00000000, /* 63 - 32 */
121 0x00000000, /* 95 - 64 */ 122 0x00000000, /* 95 - 64 */
@@ -296,7 +297,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
296 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && 297 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
297 (txq_id != IWL39_CMD_QUEUE_NUM) && 298 (txq_id != IWL39_CMD_QUEUE_NUM) &&
298 priv->mac80211_registered) 299 priv->mac80211_registered)
299 iwl_wake_queue(priv, txq_id); 300 iwl_wake_queue(priv, txq);
300} 301}
301 302
302/** 303/**
@@ -1451,6 +1452,10 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1451 }; 1452 };
1452 u16 chan; 1453 u16 chan;
1453 1454
1455 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1456 "TX Power requested while scanning!\n"))
1457 return -EAGAIN;
1458
1454 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel); 1459 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1455 1460
1456 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; 1461 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
@@ -2722,10 +2727,9 @@ static struct iwl_lib_ops iwl3945_lib = {
2722 }, 2727 },
2723 .send_tx_power = iwl3945_send_tx_power, 2728 .send_tx_power = iwl3945_send_tx_power,
2724 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, 2729 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
2725 .post_associate = iwl3945_post_associate, 2730 .isr_ops = {
2726 .isr = iwl_isr_legacy, 2731 .isr = iwl_isr_legacy,
2727 .config_ap = iwl3945_config_ap, 2732 },
2728 .manage_ibss_station = iwl3945_manage_ibss_station,
2729 .recover_from_tx_stall = iwl_bg_monitor_recover, 2733 .recover_from_tx_stall = iwl_bg_monitor_recover,
2730 .check_plcp_health = iwl3945_good_plcp_health, 2734 .check_plcp_health = iwl3945_good_plcp_health,
2731 2735
@@ -2736,10 +2740,16 @@ static struct iwl_lib_ops iwl3945_lib = {
2736 }, 2740 },
2737}; 2741};
2738 2742
2743static const struct iwl_legacy_ops iwl3945_legacy_ops = {
2744 .post_associate = iwl3945_post_associate,
2745 .config_ap = iwl3945_config_ap,
2746 .manage_ibss_station = iwl3945_manage_ibss_station,
2747};
2748
2739static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2749static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2740 .get_hcmd_size = iwl3945_get_hcmd_size, 2750 .get_hcmd_size = iwl3945_get_hcmd_size,
2741 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2751 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2742 .tx_cmd_protection = iwlcore_tx_cmd_protection, 2752 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
2743 .request_scan = iwl3945_request_scan, 2753 .request_scan = iwl3945_request_scan,
2744 .post_scan = iwl3945_post_scan, 2754 .post_scan = iwl3945_post_scan,
2745}; 2755};
@@ -2749,6 +2759,8 @@ static const struct iwl_ops iwl3945_ops = {
2749 .hcmd = &iwl3945_hcmd, 2759 .hcmd = &iwl3945_hcmd,
2750 .utils = &iwl3945_hcmd_utils, 2760 .utils = &iwl3945_hcmd_utils,
2751 .led = &iwl3945_led_ops, 2761 .led = &iwl3945_led_ops,
2762 .legacy = &iwl3945_legacy_ops,
2763 .ieee80211_ops = &iwl3945_hw_ops,
2752}; 2764};
2753 2765
2754static struct iwl_base_params iwl3945_base_params = { 2766static struct iwl_base_params iwl3945_base_params = {
@@ -2776,6 +2788,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2776 .ops = &iwl3945_ops, 2788 .ops = &iwl3945_ops,
2777 .mod_params = &iwl3945_mod_params, 2789 .mod_params = &iwl3945_mod_params,
2778 .base_params = &iwl3945_base_params, 2790 .base_params = &iwl3945_base_params,
2791 .led_mode = IWL_LED_BLINK,
2779}; 2792};
2780 2793
2781static struct iwl_cfg iwl3945_abg_cfg = { 2794static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2788,6 +2801,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2788 .ops = &iwl3945_ops, 2801 .ops = &iwl3945_ops,
2789 .mod_params = &iwl3945_mod_params, 2802 .mod_params = &iwl3945_mod_params,
2790 .base_params = &iwl3945_base_params, 2803 .base_params = &iwl3945_base_params,
2804 .led_mode = IWL_LED_BLINK,
2791}; 2805};
2792 2806
2793DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { 2807DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 09391f0ee61..3eef1eb74a7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -264,10 +264,8 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
264 struct iwl_rx_mem_buffer *rxb); 264 struct iwl_rx_mem_buffer *rxb);
265extern void iwl3945_disable_events(struct iwl_priv *priv); 265extern void iwl3945_disable_events(struct iwl_priv *priv);
266extern int iwl4965_get_temperature(const struct iwl_priv *priv); 266extern int iwl4965_get_temperature(const struct iwl_priv *priv);
267extern void iwl3945_post_associate(struct iwl_priv *priv, 267extern void iwl3945_post_associate(struct iwl_priv *priv);
268 struct ieee80211_vif *vif); 268extern void iwl3945_config_ap(struct iwl_priv *priv);
269extern void iwl3945_config_ap(struct iwl_priv *priv,
270 struct ieee80211_vif *vif);
271 269
272extern int iwl3945_commit_rxon(struct iwl_priv *priv, 270extern int iwl3945_commit_rxon(struct iwl_priv *priv,
273 struct iwl_rxon_context *ctx); 271 struct iwl_rxon_context *ctx);
@@ -282,6 +280,8 @@ extern int iwl3945_commit_rxon(struct iwl_priv *priv,
282 */ 280 */
283extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid); 281extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
284 282
283extern struct ieee80211_ops iwl3945_hw_ops;
284
285/* 285/*
286 * Forward declare iwl-3945.c functions for iwl-base.c 286 * Forward declare iwl-3945.c functions for iwl-base.c
287 */ 287 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index b207e3e9299..6788ceb3768 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -48,6 +48,7 @@
48#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn.h" 49#include "iwl-agn.h"
50#include "iwl-agn-debugfs.h" 50#include "iwl-agn-debugfs.h"
51#include "iwl-legacy.h"
51 52
52static int iwl4965_send_tx_power(struct iwl_priv *priv); 53static int iwl4965_send_tx_power(struct iwl_priv *priv);
53static int iwl4965_hw_get_temperature(struct iwl_priv *priv); 54static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -1377,13 +1378,9 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1377 u8 ctrl_chan_high = 0; 1378 u8 ctrl_chan_high = 0;
1378 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 1379 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1379 1380
1380 if (test_bit(STATUS_SCANNING, &priv->status)) { 1381 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1381 /* If this gets hit a lot, switch it to a BUG() and catch 1382 "TX Power requested while scanning!\n"))
1382 * the stack trace to find out who is calling this during
1383 * a scan. */
1384 IWL_WARN(priv, "TX Power requested while scanning!\n");
1385 return -EAGAIN; 1383 return -EAGAIN;
1386 }
1387 1384
1388 band = priv->band == IEEE80211_BAND_2GHZ; 1385 band = priv->band == IEEE80211_BAND_2GHZ;
1389 1386
@@ -1447,6 +1444,142 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1447 return ret; 1444 return ret;
1448} 1445}
1449 1446
1447static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1448{
1449 /* cast away the const for active_rxon in this function */
1450 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
1451 int ret;
1452 bool new_assoc =
1453 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1454
1455 if (!iwl_is_alive(priv))
1456 return -EBUSY;
1457
1458 if (!ctx->is_active)
1459 return 0;
1460
1461 /* always get timestamp with Rx frame */
1462 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1463
1464 ret = iwl_check_rxon_cmd(priv, ctx);
1465 if (ret) {
1466 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1467 return -EINVAL;
1468 }
1469
1470 /*
1471 * receive commit_rxon request
1472 * abort any previous channel switch if still in process
1473 */
1474 if (priv->switch_rxon.switch_in_progress &&
1475 (priv->switch_rxon.channel != ctx->staging.channel)) {
1476 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1477 le16_to_cpu(priv->switch_rxon.channel));
1478 iwl_chswitch_done(priv, false);
1479 }
1480
1481 /* If we don't need to send a full RXON, we can use
1482 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1483 * and other flags for the current radio configuration. */
1484 if (!iwl_full_rxon_required(priv, ctx)) {
1485 ret = iwl_send_rxon_assoc(priv, ctx);
1486 if (ret) {
1487 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1488 return ret;
1489 }
1490
1491 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1492 iwl_print_rx_config_cmd(priv, ctx);
1493 return 0;
1494 }
1495
1496 /* If we are currently associated and the new config requires
1497 * an RXON_ASSOC and the new config wants the associated mask enabled,
1498 * we must clear the associated from the active configuration
1499 * before we apply the new config */
1500 if (iwl_is_associated_ctx(ctx) && new_assoc) {
1501 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1502 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1503
1504 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
1505 sizeof(struct iwl_rxon_cmd),
1506 active_rxon);
1507
1508 /* If the mask clearing failed then we set
1509 * active_rxon back to what it was previously */
1510 if (ret) {
1511 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1512 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1513 return ret;
1514 }
1515 iwl_clear_ucode_stations(priv, ctx);
1516 iwl_restore_stations(priv, ctx);
1517 ret = iwl_restore_default_wep_keys(priv, ctx);
1518 if (ret) {
1519 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1520 return ret;
1521 }
1522 }
1523
1524 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1525 "* with%s RXON_FILTER_ASSOC_MSK\n"
1526 "* channel = %d\n"
1527 "* bssid = %pM\n",
1528 (new_assoc ? "" : "out"),
1529 le16_to_cpu(ctx->staging.channel),
1530 ctx->staging.bssid_addr);
1531
1532 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
1533
1534 /* Apply the new configuration
1535 * RXON unassoc clears the station table in uCode so restoration of
1536 * stations is needed after it (the RXON command) completes
1537 */
1538 if (!new_assoc) {
1539 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
1540 sizeof(struct iwl_rxon_cmd), &ctx->staging);
1541 if (ret) {
1542 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1543 return ret;
1544 }
1545 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1546 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1547 iwl_clear_ucode_stations(priv, ctx);
1548 iwl_restore_stations(priv, ctx);
1549 ret = iwl_restore_default_wep_keys(priv, ctx);
1550 if (ret) {
1551 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1552 return ret;
1553 }
1554 }
1555 if (new_assoc) {
1556 priv->start_calib = 0;
1557 /* Apply the new configuration
1558 * RXON assoc doesn't clear the station table in uCode,
1559 */
1560 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
1561 sizeof(struct iwl_rxon_cmd), &ctx->staging);
1562 if (ret) {
1563 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1564 return ret;
1565 }
1566 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1567 }
1568 iwl_print_rx_config_cmd(priv, ctx);
1569
1570 iwl_init_sensitivity(priv);
1571
1572 /* If we issue a new RXON command which required a tune then we must
1573 * send a new TXPOWER command or we won't be able to Tx any frames */
1574 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
1575 if (ret) {
1576 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1577 return ret;
1578 }
1579
1580 return 0;
1581}
1582
1450static int iwl4965_hw_channel_switch(struct iwl_priv *priv, 1583static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1451 struct ieee80211_channel_switch *ch_switch) 1584 struct ieee80211_channel_switch *ch_switch)
1452{ 1585{
@@ -1554,22 +1687,6 @@ static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
1554} 1687}
1555 1688
1556/** 1689/**
1557 * sign_extend - Sign extend a value using specified bit as sign-bit
1558 *
1559 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
1560 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
1561 *
1562 * @param oper value to sign extend
1563 * @param index 0 based bit index (0<=index<32) to sign bit
1564 */
1565static s32 sign_extend(u32 oper, int index)
1566{
1567 u8 shift = 31 - index;
1568
1569 return (s32)(oper << shift) >> shift;
1570}
1571
1572/**
1573 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin) 1690 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1574 * @statistics: Provides the temperature reading from the uCode 1691 * @statistics: Provides the temperature reading from the uCode
1575 * 1692 *
@@ -1606,9 +1723,9 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1606 * "initialize" ALIVE response. 1723 * "initialize" ALIVE response.
1607 */ 1724 */
1608 if (!test_bit(STATUS_TEMPERATURE, &priv->status)) 1725 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1609 vt = sign_extend(R4, 23); 1726 vt = sign_extend32(R4, 23);
1610 else 1727 else
1611 vt = sign_extend(le32_to_cpu(priv->_agn.statistics. 1728 vt = sign_extend32(le32_to_cpu(priv->_agn.statistics.
1612 general.common.temperature), 23); 1729 general.common.temperature), 23);
1613 1730
1614 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); 1731 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
@@ -2121,12 +2238,8 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2121 2238
2122 if (priv->mac80211_registered && 2239 if (priv->mac80211_registered &&
2123 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 2240 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
2124 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 2241 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
2125 if (agg->state == IWL_AGG_OFF) 2242 iwl_wake_queue(priv, txq);
2126 iwl_wake_queue(priv, txq_id);
2127 else
2128 iwl_wake_queue(priv, txq->swq_id);
2129 }
2130 } 2243 }
2131 } else { 2244 } else {
2132 info->status.rates[0].count = tx_resp->failure_frame + 1; 2245 info->status.rates[0].count = tx_resp->failure_frame + 1;
@@ -2150,7 +2263,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2150 2263
2151 if (priv->mac80211_registered && 2264 if (priv->mac80211_registered &&
2152 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 2265 (iwl_queue_space(&txq->q) > txq->q.low_mark))
2153 iwl_wake_queue(priv, txq_id); 2266 iwl_wake_queue(priv, txq);
2154 } 2267 }
2155 if (qc && likely(sta_id != IWL_INVALID_STATION)) 2268 if (qc && likely(sta_id != IWL_INVALID_STATION))
2156 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 2269 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
@@ -2216,7 +2329,7 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2216 2329
2217static struct iwl_hcmd_ops iwl4965_hcmd = { 2330static struct iwl_hcmd_ops iwl4965_hcmd = {
2218 .rxon_assoc = iwl4965_send_rxon_assoc, 2331 .rxon_assoc = iwl4965_send_rxon_assoc,
2219 .commit_rxon = iwlagn_commit_rxon, 2332 .commit_rxon = iwl4965_commit_rxon,
2220 .set_rxon_chain = iwlagn_set_rxon_chain, 2333 .set_rxon_chain = iwlagn_set_rxon_chain,
2221 .send_bt_config = iwl_send_bt_config, 2334 .send_bt_config = iwl_send_bt_config,
2222}; 2335};
@@ -2233,12 +2346,155 @@ static void iwl4965_post_scan(struct iwl_priv *priv)
2233 iwlcore_commit_rxon(priv, ctx); 2346 iwlcore_commit_rxon(priv, ctx);
2234} 2347}
2235 2348
2349static void iwl4965_post_associate(struct iwl_priv *priv)
2350{
2351 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2352 struct ieee80211_vif *vif = ctx->vif;
2353 struct ieee80211_conf *conf = NULL;
2354 int ret = 0;
2355
2356 if (!vif || !priv->is_open)
2357 return;
2358
2359 if (vif->type == NL80211_IFTYPE_AP) {
2360 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
2361 return;
2362 }
2363
2364 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2365 return;
2366
2367 iwl_scan_cancel_timeout(priv, 200);
2368
2369 conf = ieee80211_get_hw_conf(priv->hw);
2370
2371 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2372 iwlcore_commit_rxon(priv, ctx);
2373
2374 ret = iwl_send_rxon_timing(priv, ctx);
2375 if (ret)
2376 IWL_WARN(priv, "RXON timing - "
2377 "Attempting to continue.\n");
2378
2379 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2380
2381 iwl_set_rxon_ht(priv, &priv->current_ht_config);
2382
2383 if (priv->cfg->ops->hcmd->set_rxon_chain)
2384 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2385
2386 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
2387
2388 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2389 vif->bss_conf.aid, vif->bss_conf.beacon_int);
2390
2391 if (vif->bss_conf.use_short_preamble)
2392 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2393 else
2394 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2395
2396 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2397 if (vif->bss_conf.use_short_slot)
2398 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2399 else
2400 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2401 }
2402
2403 iwlcore_commit_rxon(priv, ctx);
2404
2405 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2406 vif->bss_conf.aid, ctx->active.bssid_addr);
2407
2408 switch (vif->type) {
2409 case NL80211_IFTYPE_STATION:
2410 break;
2411 case NL80211_IFTYPE_ADHOC:
2412 iwlagn_send_beacon_cmd(priv);
2413 break;
2414 default:
2415 IWL_ERR(priv, "%s Should not be called in %d mode\n",
2416 __func__, vif->type);
2417 break;
2418 }
2419
2420 /* the chain noise calibration will enabled PM upon completion
2421 * If chain noise has already been run, then we need to enable
2422 * power management here */
2423 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
2424 iwl_power_update_mode(priv, false);
2425
2426 /* Enable Rx differential gain and sensitivity calibrations */
2427 iwl_chain_noise_reset(priv);
2428 priv->start_calib = 1;
2429}
2430
2431static void iwl4965_config_ap(struct iwl_priv *priv)
2432{
2433 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2434 struct ieee80211_vif *vif = ctx->vif;
2435 int ret = 0;
2436
2437 lockdep_assert_held(&priv->mutex);
2438
2439 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2440 return;
2441
2442 /* The following should be done only at AP bring up */
2443 if (!iwl_is_associated_ctx(ctx)) {
2444
2445 /* RXON - unassoc (to set timing command) */
2446 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2447 iwlcore_commit_rxon(priv, ctx);
2448
2449 /* RXON Timing */
2450 ret = iwl_send_rxon_timing(priv, ctx);
2451 if (ret)
2452 IWL_WARN(priv, "RXON timing failed - "
2453 "Attempting to continue.\n");
2454
2455 /* AP has all antennas */
2456 priv->chain_noise_data.active_chains =
2457 priv->hw_params.valid_rx_ant;
2458 iwl_set_rxon_ht(priv, &priv->current_ht_config);
2459 if (priv->cfg->ops->hcmd->set_rxon_chain)
2460 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2461
2462 ctx->staging.assoc_id = 0;
2463
2464 if (vif->bss_conf.use_short_preamble)
2465 ctx->staging.flags |=
2466 RXON_FLG_SHORT_PREAMBLE_MSK;
2467 else
2468 ctx->staging.flags &=
2469 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2470
2471 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2472 if (vif->bss_conf.use_short_slot)
2473 ctx->staging.flags |=
2474 RXON_FLG_SHORT_SLOT_MSK;
2475 else
2476 ctx->staging.flags &=
2477 ~RXON_FLG_SHORT_SLOT_MSK;
2478 }
2479 /* need to send beacon cmd before committing assoc RXON! */
2480 iwlagn_send_beacon_cmd(priv);
2481 /* restore RXON assoc */
2482 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2483 iwlcore_commit_rxon(priv, ctx);
2484 }
2485 iwlagn_send_beacon_cmd(priv);
2486
2487 /* FIXME - we need to add code here to detect a totally new
2488 * configuration, reset the AP, unassoc, rxon timing, assoc,
2489 * clear sta table, add BCAST sta... */
2490}
2491
2236static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 2492static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2237 .get_hcmd_size = iwl4965_get_hcmd_size, 2493 .get_hcmd_size = iwl4965_get_hcmd_size,
2238 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2494 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2239 .chain_noise_reset = iwl4965_chain_noise_reset, 2495 .chain_noise_reset = iwl4965_chain_noise_reset,
2240 .gain_computation = iwl4965_gain_computation, 2496 .gain_computation = iwl4965_gain_computation,
2241 .tx_cmd_protection = iwlcore_tx_cmd_protection, 2497 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
2242 .calc_rssi = iwl4965_calc_rssi, 2498 .calc_rssi = iwl4965_calc_rssi,
2243 .request_scan = iwlagn_request_scan, 2499 .request_scan = iwlagn_request_scan,
2244 .post_scan = iwl4965_post_scan, 2500 .post_scan = iwl4965_post_scan,
@@ -2285,14 +2541,12 @@ static struct iwl_lib_ops iwl4965_lib = {
2285 }, 2541 },
2286 .send_tx_power = iwl4965_send_tx_power, 2542 .send_tx_power = iwl4965_send_tx_power,
2287 .update_chain_flags = iwl_update_chain_flags, 2543 .update_chain_flags = iwl_update_chain_flags,
2288 .post_associate = iwl_post_associate, 2544 .isr_ops = {
2289 .config_ap = iwl_config_ap, 2545 .isr = iwl_isr_legacy,
2290 .isr = iwl_isr_legacy, 2546 },
2291 .temp_ops = { 2547 .temp_ops = {
2292 .temperature = iwl4965_temperature_calib, 2548 .temperature = iwl4965_temperature_calib,
2293 }, 2549 },
2294 .manage_ibss_station = iwlagn_manage_ibss_station,
2295 .update_bcast_stations = iwl_update_bcast_stations,
2296 .debugfs_ops = { 2550 .debugfs_ops = {
2297 .rx_stats_read = iwl_ucode_rx_stats_read, 2551 .rx_stats_read = iwl_ucode_rx_stats_read,
2298 .tx_stats_read = iwl_ucode_tx_stats_read, 2552 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -2304,11 +2558,43 @@ static struct iwl_lib_ops iwl4965_lib = {
2304 .check_plcp_health = iwl_good_plcp_health, 2558 .check_plcp_health = iwl_good_plcp_health,
2305}; 2559};
2306 2560
2561static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2562 .post_associate = iwl4965_post_associate,
2563 .config_ap = iwl4965_config_ap,
2564 .manage_ibss_station = iwlagn_manage_ibss_station,
2565 .update_bcast_stations = iwl_update_bcast_stations,
2566};
2567
2568struct ieee80211_ops iwl4965_hw_ops = {
2569 .tx = iwlagn_mac_tx,
2570 .start = iwlagn_mac_start,
2571 .stop = iwlagn_mac_stop,
2572 .add_interface = iwl_mac_add_interface,
2573 .remove_interface = iwl_mac_remove_interface,
2574 .change_interface = iwl_mac_change_interface,
2575 .config = iwl_legacy_mac_config,
2576 .configure_filter = iwlagn_configure_filter,
2577 .set_key = iwlagn_mac_set_key,
2578 .update_tkip_key = iwlagn_mac_update_tkip_key,
2579 .conf_tx = iwl_mac_conf_tx,
2580 .reset_tsf = iwl_legacy_mac_reset_tsf,
2581 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2582 .ampdu_action = iwlagn_mac_ampdu_action,
2583 .hw_scan = iwl_mac_hw_scan,
2584 .sta_add = iwlagn_mac_sta_add,
2585 .sta_remove = iwl_mac_sta_remove,
2586 .channel_switch = iwlagn_mac_channel_switch,
2587 .flush = iwlagn_mac_flush,
2588 .tx_last_beacon = iwl_mac_tx_last_beacon,
2589};
2590
2307static const struct iwl_ops iwl4965_ops = { 2591static const struct iwl_ops iwl4965_ops = {
2308 .lib = &iwl4965_lib, 2592 .lib = &iwl4965_lib,
2309 .hcmd = &iwl4965_hcmd, 2593 .hcmd = &iwl4965_hcmd,
2310 .utils = &iwl4965_hcmd_utils, 2594 .utils = &iwl4965_hcmd_utils,
2311 .led = &iwlagn_led_ops, 2595 .led = &iwlagn_led_ops,
2596 .legacy = &iwl4965_legacy_ops,
2597 .ieee80211_ops = &iwl4965_hw_ops,
2312}; 2598};
2313 2599
2314static struct iwl_base_params iwl4965_base_params = { 2600static struct iwl_base_params iwl4965_base_params = {
@@ -2330,6 +2616,7 @@ static struct iwl_base_params iwl4965_base_params = {
2330 .ucode_tracing = true, 2616 .ucode_tracing = true,
2331 .sensitivity_calib_by_driver = true, 2617 .sensitivity_calib_by_driver = true,
2332 .chain_noise_calib_by_driver = true, 2618 .chain_noise_calib_by_driver = true,
2619 .no_agg_framecnt_info = true,
2333}; 2620};
2334 2621
2335struct iwl_cfg iwl4965_agn_cfg = { 2622struct iwl_cfg iwl4965_agn_cfg = {
@@ -2337,7 +2624,6 @@ struct iwl_cfg iwl4965_agn_cfg = {
2337 .fw_name_pre = IWL4965_FW_PRE, 2624 .fw_name_pre = IWL4965_FW_PRE,
2338 .ucode_api_max = IWL4965_UCODE_API_MAX, 2625 .ucode_api_max = IWL4965_UCODE_API_MAX,
2339 .ucode_api_min = IWL4965_UCODE_API_MIN, 2626 .ucode_api_min = IWL4965_UCODE_API_MIN,
2340 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2341 .valid_tx_ant = ANT_AB, 2627 .valid_tx_ant = ANT_AB,
2342 .valid_rx_ant = ANT_ABC, 2628 .valid_rx_ant = ANT_ABC,
2343 .eeprom_ver = EEPROM_4965_EEPROM_VERSION, 2629 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
@@ -2345,6 +2631,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2345 .ops = &iwl4965_ops, 2631 .ops = &iwl4965_ops,
2346 .mod_params = &iwlagn_mod_params, 2632 .mod_params = &iwlagn_mod_params,
2347 .base_params = &iwl4965_base_params, 2633 .base_params = &iwl4965_base_params,
2634 .led_mode = IWL_LED_BLINK,
2348 /* 2635 /*
2349 * Force use of chains B and C for scan RX on 5 GHz band 2636 * Force use of chains B and C for scan RX on 5 GHz band
2350 * because the device has off-channel reception on chain A. 2637 * because the device has off-channel reception on chain A.
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index fd9fbc93ea1..3ee0f7c035c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -385,14 +385,16 @@ static struct iwl_lib_ops iwl5000_lib = {
385 .calib_version = iwlagn_eeprom_calib_version, 385 .calib_version = iwlagn_eeprom_calib_version,
386 .query_addr = iwlagn_eeprom_query_addr, 386 .query_addr = iwlagn_eeprom_query_addr,
387 }, 387 },
388 .post_associate = iwl_post_associate, 388 .isr_ops = {
389 .isr = iwl_isr_ict, 389 .isr = iwl_isr_ict,
390 .config_ap = iwl_config_ap, 390 .free = iwl_free_isr_ict,
391 .alloc = iwl_alloc_isr_ict,
392 .reset = iwl_reset_ict,
393 .disable = iwl_disable_ict,
394 },
391 .temp_ops = { 395 .temp_ops = {
392 .temperature = iwlagn_temperature, 396 .temperature = iwlagn_temperature,
393 }, 397 },
394 .manage_ibss_station = iwlagn_manage_ibss_station,
395 .update_bcast_stations = iwl_update_bcast_stations,
396 .debugfs_ops = { 398 .debugfs_ops = {
397 .rx_stats_read = iwl_ucode_rx_stats_read, 399 .rx_stats_read = iwl_ucode_rx_stats_read,
398 .tx_stats_read = iwl_ucode_tx_stats_read, 400 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -453,14 +455,16 @@ static struct iwl_lib_ops iwl5150_lib = {
453 .calib_version = iwlagn_eeprom_calib_version, 455 .calib_version = iwlagn_eeprom_calib_version,
454 .query_addr = iwlagn_eeprom_query_addr, 456 .query_addr = iwlagn_eeprom_query_addr,
455 }, 457 },
456 .post_associate = iwl_post_associate, 458 .isr_ops = {
457 .isr = iwl_isr_ict, 459 .isr = iwl_isr_ict,
458 .config_ap = iwl_config_ap, 460 .free = iwl_free_isr_ict,
461 .alloc = iwl_alloc_isr_ict,
462 .reset = iwl_reset_ict,
463 .disable = iwl_disable_ict,
464 },
459 .temp_ops = { 465 .temp_ops = {
460 .temperature = iwl5150_temperature, 466 .temperature = iwl5150_temperature,
461 }, 467 },
462 .manage_ibss_station = iwlagn_manage_ibss_station,
463 .update_bcast_stations = iwl_update_bcast_stations,
464 .debugfs_ops = { 468 .debugfs_ops = {
465 .rx_stats_read = iwl_ucode_rx_stats_read, 469 .rx_stats_read = iwl_ucode_rx_stats_read,
466 .tx_stats_read = iwl_ucode_tx_stats_read, 470 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -485,6 +489,7 @@ static const struct iwl_ops iwl5000_ops = {
485 .hcmd = &iwlagn_hcmd, 489 .hcmd = &iwlagn_hcmd,
486 .utils = &iwlagn_hcmd_utils, 490 .utils = &iwlagn_hcmd_utils,
487 .led = &iwlagn_led_ops, 491 .led = &iwlagn_led_ops,
492 .ieee80211_ops = &iwlagn_hw_ops,
488}; 493};
489 494
490static const struct iwl_ops iwl5150_ops = { 495static const struct iwl_ops iwl5150_ops = {
@@ -492,6 +497,7 @@ static const struct iwl_ops iwl5150_ops = {
492 .hcmd = &iwlagn_hcmd, 497 .hcmd = &iwlagn_hcmd,
493 .utils = &iwlagn_hcmd_utils, 498 .utils = &iwlagn_hcmd_utils,
494 .led = &iwlagn_led_ops, 499 .led = &iwlagn_led_ops,
500 .ieee80211_ops = &iwlagn_hw_ops,
495}; 501};
496 502
497static struct iwl_base_params iwl5000_base_params = { 503static struct iwl_base_params iwl5000_base_params = {
@@ -521,7 +527,6 @@ struct iwl_cfg iwl5300_agn_cfg = {
521 .fw_name_pre = IWL5000_FW_PRE, 527 .fw_name_pre = IWL5000_FW_PRE,
522 .ucode_api_max = IWL5000_UCODE_API_MAX, 528 .ucode_api_max = IWL5000_UCODE_API_MAX,
523 .ucode_api_min = IWL5000_UCODE_API_MIN, 529 .ucode_api_min = IWL5000_UCODE_API_MIN,
524 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
525 .valid_tx_ant = ANT_ABC, 530 .valid_tx_ant = ANT_ABC,
526 .valid_rx_ant = ANT_ABC, 531 .valid_rx_ant = ANT_ABC,
527 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 532 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
@@ -530,6 +535,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
530 .mod_params = &iwlagn_mod_params, 535 .mod_params = &iwlagn_mod_params,
531 .base_params = &iwl5000_base_params, 536 .base_params = &iwl5000_base_params,
532 .ht_params = &iwl5000_ht_params, 537 .ht_params = &iwl5000_ht_params,
538 .led_mode = IWL_LED_BLINK,
533}; 539};
534 540
535struct iwl_cfg iwl5100_bgn_cfg = { 541struct iwl_cfg iwl5100_bgn_cfg = {
@@ -537,7 +543,6 @@ struct iwl_cfg iwl5100_bgn_cfg = {
537 .fw_name_pre = IWL5000_FW_PRE, 543 .fw_name_pre = IWL5000_FW_PRE,
538 .ucode_api_max = IWL5000_UCODE_API_MAX, 544 .ucode_api_max = IWL5000_UCODE_API_MAX,
539 .ucode_api_min = IWL5000_UCODE_API_MIN, 545 .ucode_api_min = IWL5000_UCODE_API_MIN,
540 .sku = IWL_SKU_G|IWL_SKU_N,
541 .valid_tx_ant = ANT_B, 546 .valid_tx_ant = ANT_B,
542 .valid_rx_ant = ANT_AB, 547 .valid_rx_ant = ANT_AB,
543 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 548 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
@@ -546,6 +551,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
546 .mod_params = &iwlagn_mod_params, 551 .mod_params = &iwlagn_mod_params,
547 .base_params = &iwl5000_base_params, 552 .base_params = &iwl5000_base_params,
548 .ht_params = &iwl5000_ht_params, 553 .ht_params = &iwl5000_ht_params,
554 .led_mode = IWL_LED_BLINK,
549}; 555};
550 556
551struct iwl_cfg iwl5100_abg_cfg = { 557struct iwl_cfg iwl5100_abg_cfg = {
@@ -553,7 +559,6 @@ struct iwl_cfg iwl5100_abg_cfg = {
553 .fw_name_pre = IWL5000_FW_PRE, 559 .fw_name_pre = IWL5000_FW_PRE,
554 .ucode_api_max = IWL5000_UCODE_API_MAX, 560 .ucode_api_max = IWL5000_UCODE_API_MAX,
555 .ucode_api_min = IWL5000_UCODE_API_MIN, 561 .ucode_api_min = IWL5000_UCODE_API_MIN,
556 .sku = IWL_SKU_A|IWL_SKU_G,
557 .valid_tx_ant = ANT_B, 562 .valid_tx_ant = ANT_B,
558 .valid_rx_ant = ANT_AB, 563 .valid_rx_ant = ANT_AB,
559 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 564 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
@@ -561,6 +566,7 @@ struct iwl_cfg iwl5100_abg_cfg = {
561 .ops = &iwl5000_ops, 566 .ops = &iwl5000_ops,
562 .mod_params = &iwlagn_mod_params, 567 .mod_params = &iwlagn_mod_params,
563 .base_params = &iwl5000_base_params, 568 .base_params = &iwl5000_base_params,
569 .led_mode = IWL_LED_BLINK,
564}; 570};
565 571
566struct iwl_cfg iwl5100_agn_cfg = { 572struct iwl_cfg iwl5100_agn_cfg = {
@@ -568,7 +574,6 @@ struct iwl_cfg iwl5100_agn_cfg = {
568 .fw_name_pre = IWL5000_FW_PRE, 574 .fw_name_pre = IWL5000_FW_PRE,
569 .ucode_api_max = IWL5000_UCODE_API_MAX, 575 .ucode_api_max = IWL5000_UCODE_API_MAX,
570 .ucode_api_min = IWL5000_UCODE_API_MIN, 576 .ucode_api_min = IWL5000_UCODE_API_MIN,
571 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
572 .valid_tx_ant = ANT_B, 577 .valid_tx_ant = ANT_B,
573 .valid_rx_ant = ANT_AB, 578 .valid_rx_ant = ANT_AB,
574 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 579 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
@@ -577,6 +582,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
577 .mod_params = &iwlagn_mod_params, 582 .mod_params = &iwlagn_mod_params,
578 .base_params = &iwl5000_base_params, 583 .base_params = &iwl5000_base_params,
579 .ht_params = &iwl5000_ht_params, 584 .ht_params = &iwl5000_ht_params,
585 .led_mode = IWL_LED_BLINK,
580}; 586};
581 587
582struct iwl_cfg iwl5350_agn_cfg = { 588struct iwl_cfg iwl5350_agn_cfg = {
@@ -584,7 +590,6 @@ struct iwl_cfg iwl5350_agn_cfg = {
584 .fw_name_pre = IWL5000_FW_PRE, 590 .fw_name_pre = IWL5000_FW_PRE,
585 .ucode_api_max = IWL5000_UCODE_API_MAX, 591 .ucode_api_max = IWL5000_UCODE_API_MAX,
586 .ucode_api_min = IWL5000_UCODE_API_MIN, 592 .ucode_api_min = IWL5000_UCODE_API_MIN,
587 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
588 .valid_tx_ant = ANT_ABC, 593 .valid_tx_ant = ANT_ABC,
589 .valid_rx_ant = ANT_ABC, 594 .valid_rx_ant = ANT_ABC,
590 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 595 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
@@ -593,6 +598,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
593 .mod_params = &iwlagn_mod_params, 598 .mod_params = &iwlagn_mod_params,
594 .base_params = &iwl5000_base_params, 599 .base_params = &iwl5000_base_params,
595 .ht_params = &iwl5000_ht_params, 600 .ht_params = &iwl5000_ht_params,
601 .led_mode = IWL_LED_BLINK,
596}; 602};
597 603
598struct iwl_cfg iwl5150_agn_cfg = { 604struct iwl_cfg iwl5150_agn_cfg = {
@@ -600,7 +606,6 @@ struct iwl_cfg iwl5150_agn_cfg = {
600 .fw_name_pre = IWL5150_FW_PRE, 606 .fw_name_pre = IWL5150_FW_PRE,
601 .ucode_api_max = IWL5150_UCODE_API_MAX, 607 .ucode_api_max = IWL5150_UCODE_API_MAX,
602 .ucode_api_min = IWL5150_UCODE_API_MIN, 608 .ucode_api_min = IWL5150_UCODE_API_MIN,
603 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
604 .valid_tx_ant = ANT_A, 609 .valid_tx_ant = ANT_A,
605 .valid_rx_ant = ANT_AB, 610 .valid_rx_ant = ANT_AB,
606 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 611 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
@@ -610,6 +615,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
610 .base_params = &iwl5000_base_params, 615 .base_params = &iwl5000_base_params,
611 .ht_params = &iwl5000_ht_params, 616 .ht_params = &iwl5000_ht_params,
612 .need_dc_calib = true, 617 .need_dc_calib = true,
618 .led_mode = IWL_LED_BLINK,
613}; 619};
614 620
615struct iwl_cfg iwl5150_abg_cfg = { 621struct iwl_cfg iwl5150_abg_cfg = {
@@ -617,7 +623,6 @@ struct iwl_cfg iwl5150_abg_cfg = {
617 .fw_name_pre = IWL5150_FW_PRE, 623 .fw_name_pre = IWL5150_FW_PRE,
618 .ucode_api_max = IWL5150_UCODE_API_MAX, 624 .ucode_api_max = IWL5150_UCODE_API_MAX,
619 .ucode_api_min = IWL5150_UCODE_API_MIN, 625 .ucode_api_min = IWL5150_UCODE_API_MIN,
620 .sku = IWL_SKU_A|IWL_SKU_G,
621 .valid_tx_ant = ANT_A, 626 .valid_tx_ant = ANT_A,
622 .valid_rx_ant = ANT_AB, 627 .valid_rx_ant = ANT_AB,
623 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 628 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
@@ -626,6 +631,7 @@ struct iwl_cfg iwl5150_abg_cfg = {
626 .mod_params = &iwlagn_mod_params, 631 .mod_params = &iwlagn_mod_params,
627 .base_params = &iwl5000_base_params, 632 .base_params = &iwl5000_base_params,
628 .need_dc_calib = true, 633 .need_dc_calib = true,
634 .led_mode = IWL_LED_BLINK,
629}; 635};
630 636
631MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 637MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 11e6532fc57..93e3fe92f38 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -53,13 +53,11 @@
53#define IWL6000_UCODE_API_MAX 4 53#define IWL6000_UCODE_API_MAX 4
54#define IWL6050_UCODE_API_MAX 5 54#define IWL6050_UCODE_API_MAX 5
55#define IWL6000G2_UCODE_API_MAX 5 55#define IWL6000G2_UCODE_API_MAX 5
56#define IWL130_UCODE_API_MAX 5
57 56
58/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
59#define IWL6000_UCODE_API_MIN 4 58#define IWL6000_UCODE_API_MIN 4
60#define IWL6050_UCODE_API_MIN 4 59#define IWL6050_UCODE_API_MIN 4
61#define IWL6000G2_UCODE_API_MIN 4 60#define IWL6000G2_UCODE_API_MIN 4
62#define IWL130_UCODE_API_MIN 5
63 61
64#define IWL6000_FW_PRE "iwlwifi-6000-" 62#define IWL6000_FW_PRE "iwlwifi-6000-"
65#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 63#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -77,10 +75,6 @@
77#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode" 75#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
78#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api) 76#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
79 77
80#define IWL130_FW_PRE "iwlwifi-130-"
81#define _IWL130_MODULE_FIRMWARE(api) IWL130_FW_PRE #api ".ucode"
82#define IWL130_MODULE_FIRMWARE(api) _IWL130_MODULE_FIRMWARE(api)
83
84static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 78static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
85{ 79{
86 /* want Celsius */ 80 /* want Celsius */
@@ -328,14 +322,16 @@ static struct iwl_lib_ops iwl6000_lib = {
328 .query_addr = iwlagn_eeprom_query_addr, 322 .query_addr = iwlagn_eeprom_query_addr,
329 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 323 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
330 }, 324 },
331 .post_associate = iwl_post_associate, 325 .isr_ops = {
332 .isr = iwl_isr_ict, 326 .isr = iwl_isr_ict,
333 .config_ap = iwl_config_ap, 327 .free = iwl_free_isr_ict,
328 .alloc = iwl_alloc_isr_ict,
329 .reset = iwl_reset_ict,
330 .disable = iwl_disable_ict,
331 },
334 .temp_ops = { 332 .temp_ops = {
335 .temperature = iwlagn_temperature, 333 .temperature = iwlagn_temperature,
336 }, 334 },
337 .manage_ibss_station = iwlagn_manage_ibss_station,
338 .update_bcast_stations = iwl_update_bcast_stations,
339 .debugfs_ops = { 335 .debugfs_ops = {
340 .rx_stats_read = iwl_ucode_rx_stats_read, 336 .rx_stats_read = iwl_ucode_rx_stats_read,
341 .tx_stats_read = iwl_ucode_tx_stats_read, 337 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -399,14 +395,16 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
399 .query_addr = iwlagn_eeprom_query_addr, 395 .query_addr = iwlagn_eeprom_query_addr,
400 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 396 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
401 }, 397 },
402 .post_associate = iwl_post_associate, 398 .isr_ops = {
403 .isr = iwl_isr_ict, 399 .isr = iwl_isr_ict,
404 .config_ap = iwl_config_ap, 400 .free = iwl_free_isr_ict,
401 .alloc = iwl_alloc_isr_ict,
402 .reset = iwl_reset_ict,
403 .disable = iwl_disable_ict,
404 },
405 .temp_ops = { 405 .temp_ops = {
406 .temperature = iwlagn_temperature, 406 .temperature = iwlagn_temperature,
407 }, 407 },
408 .manage_ibss_station = iwlagn_manage_ibss_station,
409 .update_bcast_stations = iwl_update_bcast_stations,
410 .debugfs_ops = { 408 .debugfs_ops = {
411 .rx_stats_read = iwl_ucode_rx_stats_read, 409 .rx_stats_read = iwl_ucode_rx_stats_read,
412 .tx_stats_read = iwl_ucode_tx_stats_read, 410 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -439,6 +437,7 @@ static const struct iwl_ops iwl6000_ops = {
439 .hcmd = &iwlagn_hcmd, 437 .hcmd = &iwlagn_hcmd,
440 .utils = &iwlagn_hcmd_utils, 438 .utils = &iwlagn_hcmd_utils,
441 .led = &iwlagn_led_ops, 439 .led = &iwlagn_led_ops,
440 .ieee80211_ops = &iwlagn_hw_ops,
442}; 441};
443 442
444static const struct iwl_ops iwl6050_ops = { 443static const struct iwl_ops iwl6050_ops = {
@@ -447,6 +446,7 @@ static const struct iwl_ops iwl6050_ops = {
447 .utils = &iwlagn_hcmd_utils, 446 .utils = &iwlagn_hcmd_utils,
448 .led = &iwlagn_led_ops, 447 .led = &iwlagn_led_ops,
449 .nic = &iwl6050_nic_ops, 448 .nic = &iwl6050_nic_ops,
449 .ieee80211_ops = &iwlagn_hw_ops,
450}; 450};
451 451
452static const struct iwl_ops iwl6050g2_ops = { 452static const struct iwl_ops iwl6050g2_ops = {
@@ -455,6 +455,7 @@ static const struct iwl_ops iwl6050g2_ops = {
455 .utils = &iwlagn_hcmd_utils, 455 .utils = &iwlagn_hcmd_utils,
456 .led = &iwlagn_led_ops, 456 .led = &iwlagn_led_ops,
457 .nic = &iwl6050g2_nic_ops, 457 .nic = &iwl6050g2_nic_ops,
458 .ieee80211_ops = &iwlagn_hw_ops,
458}; 459};
459 460
460static const struct iwl_ops iwl6000g2b_ops = { 461static const struct iwl_ops iwl6000g2b_ops = {
@@ -462,6 +463,7 @@ static const struct iwl_ops iwl6000g2b_ops = {
462 .hcmd = &iwlagn_bt_hcmd, 463 .hcmd = &iwlagn_bt_hcmd,
463 .utils = &iwlagn_hcmd_utils, 464 .utils = &iwlagn_hcmd_utils,
464 .led = &iwlagn_led_ops, 465 .led = &iwlagn_led_ops,
466 .ieee80211_ops = &iwlagn_hw_ops,
465}; 467};
466 468
467static struct iwl_base_params iwl6000_base_params = { 469static struct iwl_base_params iwl6000_base_params = {
@@ -485,6 +487,7 @@ static struct iwl_base_params iwl6000_base_params = {
485 .ucode_tracing = true, 487 .ucode_tracing = true,
486 .sensitivity_calib_by_driver = true, 488 .sensitivity_calib_by_driver = true,
487 .chain_noise_calib_by_driver = true, 489 .chain_noise_calib_by_driver = true,
490 .shadow_reg_enable = true,
488}; 491};
489 492
490static struct iwl_base_params iwl6050_base_params = { 493static struct iwl_base_params iwl6050_base_params = {
@@ -508,6 +511,7 @@ static struct iwl_base_params iwl6050_base_params = {
508 .ucode_tracing = true, 511 .ucode_tracing = true,
509 .sensitivity_calib_by_driver = true, 512 .sensitivity_calib_by_driver = true,
510 .chain_noise_calib_by_driver = true, 513 .chain_noise_calib_by_driver = true,
514 .shadow_reg_enable = true,
511}; 515};
512static struct iwl_base_params iwl6000_coex_base_params = { 516static struct iwl_base_params iwl6000_coex_base_params = {
513 .eeprom_size = OTP_LOW_IMAGE_SIZE, 517 .eeprom_size = OTP_LOW_IMAGE_SIZE,
@@ -530,6 +534,7 @@ static struct iwl_base_params iwl6000_coex_base_params = {
530 .ucode_tracing = true, 534 .ucode_tracing = true,
531 .sensitivity_calib_by_driver = true, 535 .sensitivity_calib_by_driver = true,
532 .chain_noise_calib_by_driver = true, 536 .chain_noise_calib_by_driver = true,
537 .shadow_reg_enable = true,
533}; 538};
534 539
535static struct iwl_ht_params iwl6000_ht_params = { 540static struct iwl_ht_params iwl6000_ht_params = {
@@ -541,8 +546,10 @@ static struct iwl_bt_params iwl6000_bt_params = {
541 .bt_statistics = true, 546 .bt_statistics = true,
542 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 547 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
543 .advanced_bt_coexist = true, 548 .advanced_bt_coexist = true,
549 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
544 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, 550 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
545 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, 551 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
552 .bt_sco_disable = true,
546}; 553};
547 554
548struct iwl_cfg iwl6000g2a_2agn_cfg = { 555struct iwl_cfg iwl6000g2a_2agn_cfg = {
@@ -550,7 +557,6 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
550 .fw_name_pre = IWL6000G2A_FW_PRE, 557 .fw_name_pre = IWL6000G2A_FW_PRE,
551 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 558 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
552 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 559 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
553 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
554 .valid_tx_ant = ANT_AB, 560 .valid_tx_ant = ANT_AB,
555 .valid_rx_ant = ANT_AB, 561 .valid_rx_ant = ANT_AB,
556 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 562 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -561,6 +567,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
561 .ht_params = &iwl6000_ht_params, 567 .ht_params = &iwl6000_ht_params,
562 .need_dc_calib = true, 568 .need_dc_calib = true,
563 .need_temp_offset_calib = true, 569 .need_temp_offset_calib = true,
570 .led_mode = IWL_LED_RF_STATE,
564}; 571};
565 572
566struct iwl_cfg iwl6000g2a_2abg_cfg = { 573struct iwl_cfg iwl6000g2a_2abg_cfg = {
@@ -568,7 +575,6 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = {
568 .fw_name_pre = IWL6000G2A_FW_PRE, 575 .fw_name_pre = IWL6000G2A_FW_PRE,
569 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 576 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
570 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 577 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
571 .sku = IWL_SKU_A|IWL_SKU_G,
572 .valid_tx_ant = ANT_AB, 578 .valid_tx_ant = ANT_AB,
573 .valid_rx_ant = ANT_AB, 579 .valid_rx_ant = ANT_AB,
574 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 580 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -578,6 +584,7 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = {
578 .base_params = &iwl6000_base_params, 584 .base_params = &iwl6000_base_params,
579 .need_dc_calib = true, 585 .need_dc_calib = true,
580 .need_temp_offset_calib = true, 586 .need_temp_offset_calib = true,
587 .led_mode = IWL_LED_RF_STATE,
581}; 588};
582 589
583struct iwl_cfg iwl6000g2a_2bg_cfg = { 590struct iwl_cfg iwl6000g2a_2bg_cfg = {
@@ -585,7 +592,6 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = {
585 .fw_name_pre = IWL6000G2A_FW_PRE, 592 .fw_name_pre = IWL6000G2A_FW_PRE,
586 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 593 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
587 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 594 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
588 .sku = IWL_SKU_G,
589 .valid_tx_ant = ANT_AB, 595 .valid_tx_ant = ANT_AB,
590 .valid_rx_ant = ANT_AB, 596 .valid_rx_ant = ANT_AB,
591 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 597 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -595,6 +601,7 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = {
595 .base_params = &iwl6000_base_params, 601 .base_params = &iwl6000_base_params,
596 .need_dc_calib = true, 602 .need_dc_calib = true,
597 .need_temp_offset_calib = true, 603 .need_temp_offset_calib = true,
604 .led_mode = IWL_LED_RF_STATE,
598}; 605};
599 606
600struct iwl_cfg iwl6000g2b_2agn_cfg = { 607struct iwl_cfg iwl6000g2b_2agn_cfg = {
@@ -602,7 +609,6 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
602 .fw_name_pre = IWL6000G2B_FW_PRE, 609 .fw_name_pre = IWL6000G2B_FW_PRE,
603 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 610 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
604 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 611 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
605 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
606 .valid_tx_ant = ANT_AB, 612 .valid_tx_ant = ANT_AB,
607 .valid_rx_ant = ANT_AB, 613 .valid_rx_ant = ANT_AB,
608 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 614 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -614,6 +620,8 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
614 .ht_params = &iwl6000_ht_params, 620 .ht_params = &iwl6000_ht_params,
615 .need_dc_calib = true, 621 .need_dc_calib = true,
616 .need_temp_offset_calib = true, 622 .need_temp_offset_calib = true,
623 .led_mode = IWL_LED_RF_STATE,
624 .adv_pm = true,
617 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 625 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
618 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 626 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
619}; 627};
@@ -623,7 +631,6 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
623 .fw_name_pre = IWL6000G2B_FW_PRE, 631 .fw_name_pre = IWL6000G2B_FW_PRE,
624 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 632 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
625 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 633 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
626 .sku = IWL_SKU_A|IWL_SKU_G,
627 .valid_tx_ant = ANT_AB, 634 .valid_tx_ant = ANT_AB,
628 .valid_rx_ant = ANT_AB, 635 .valid_rx_ant = ANT_AB,
629 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 636 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -634,6 +641,8 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
634 .bt_params = &iwl6000_bt_params, 641 .bt_params = &iwl6000_bt_params,
635 .need_dc_calib = true, 642 .need_dc_calib = true,
636 .need_temp_offset_calib = true, 643 .need_temp_offset_calib = true,
644 .led_mode = IWL_LED_RF_STATE,
645 .adv_pm = true,
637 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 646 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
638 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 647 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
639}; 648};
@@ -643,7 +652,6 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
643 .fw_name_pre = IWL6000G2B_FW_PRE, 652 .fw_name_pre = IWL6000G2B_FW_PRE,
644 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 653 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
645 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 654 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
646 .sku = IWL_SKU_G|IWL_SKU_N,
647 .valid_tx_ant = ANT_AB, 655 .valid_tx_ant = ANT_AB,
648 .valid_rx_ant = ANT_AB, 656 .valid_rx_ant = ANT_AB,
649 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 657 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -655,6 +663,8 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
655 .ht_params = &iwl6000_ht_params, 663 .ht_params = &iwl6000_ht_params,
656 .need_dc_calib = true, 664 .need_dc_calib = true,
657 .need_temp_offset_calib = true, 665 .need_temp_offset_calib = true,
666 .led_mode = IWL_LED_RF_STATE,
667 .adv_pm = true,
658 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 668 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
659 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 669 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
660}; 670};
@@ -664,7 +674,6 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
664 .fw_name_pre = IWL6000G2B_FW_PRE, 674 .fw_name_pre = IWL6000G2B_FW_PRE,
665 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 675 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
666 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 676 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
667 .sku = IWL_SKU_G,
668 .valid_tx_ant = ANT_AB, 677 .valid_tx_ant = ANT_AB,
669 .valid_rx_ant = ANT_AB, 678 .valid_rx_ant = ANT_AB,
670 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 679 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -675,6 +684,8 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
675 .bt_params = &iwl6000_bt_params, 684 .bt_params = &iwl6000_bt_params,
676 .need_dc_calib = true, 685 .need_dc_calib = true,
677 .need_temp_offset_calib = true, 686 .need_temp_offset_calib = true,
687 .led_mode = IWL_LED_RF_STATE,
688 .adv_pm = true,
678 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 689 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
679 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 690 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
680}; 691};
@@ -684,7 +695,6 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
684 .fw_name_pre = IWL6000G2B_FW_PRE, 695 .fw_name_pre = IWL6000G2B_FW_PRE,
685 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 696 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
686 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 697 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
687 .sku = IWL_SKU_G|IWL_SKU_N,
688 .valid_tx_ant = ANT_A, 698 .valid_tx_ant = ANT_A,
689 .valid_rx_ant = ANT_AB, 699 .valid_rx_ant = ANT_AB,
690 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 700 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -696,6 +706,8 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
696 .ht_params = &iwl6000_ht_params, 706 .ht_params = &iwl6000_ht_params,
697 .need_dc_calib = true, 707 .need_dc_calib = true,
698 .need_temp_offset_calib = true, 708 .need_temp_offset_calib = true,
709 .led_mode = IWL_LED_RF_STATE,
710 .adv_pm = true,
699 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 711 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
700 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 712 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
701}; 713};
@@ -705,7 +717,6 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
705 .fw_name_pre = IWL6000G2B_FW_PRE, 717 .fw_name_pre = IWL6000G2B_FW_PRE,
706 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 718 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
707 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 719 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
708 .sku = IWL_SKU_G,
709 .valid_tx_ant = ANT_A, 720 .valid_tx_ant = ANT_A,
710 .valid_rx_ant = ANT_AB, 721 .valid_rx_ant = ANT_AB,
711 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 722 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -716,6 +727,8 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
716 .bt_params = &iwl6000_bt_params, 727 .bt_params = &iwl6000_bt_params,
717 .need_dc_calib = true, 728 .need_dc_calib = true,
718 .need_temp_offset_calib = true, 729 .need_temp_offset_calib = true,
730 .led_mode = IWL_LED_RF_STATE,
731 .adv_pm = true,
719 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 732 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
720 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 733 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
721}; 734};
@@ -728,7 +741,6 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
728 .fw_name_pre = IWL6000_FW_PRE, 741 .fw_name_pre = IWL6000_FW_PRE,
729 .ucode_api_max = IWL6000_UCODE_API_MAX, 742 .ucode_api_max = IWL6000_UCODE_API_MAX,
730 .ucode_api_min = IWL6000_UCODE_API_MIN, 743 .ucode_api_min = IWL6000_UCODE_API_MIN,
731 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
732 .valid_tx_ant = ANT_BC, 744 .valid_tx_ant = ANT_BC,
733 .valid_rx_ant = ANT_BC, 745 .valid_rx_ant = ANT_BC,
734 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 746 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
@@ -738,6 +750,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
738 .base_params = &iwl6000_base_params, 750 .base_params = &iwl6000_base_params,
739 .ht_params = &iwl6000_ht_params, 751 .ht_params = &iwl6000_ht_params,
740 .pa_type = IWL_PA_INTERNAL, 752 .pa_type = IWL_PA_INTERNAL,
753 .led_mode = IWL_LED_BLINK,
741}; 754};
742 755
743struct iwl_cfg iwl6000i_2abg_cfg = { 756struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -745,7 +758,6 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
745 .fw_name_pre = IWL6000_FW_PRE, 758 .fw_name_pre = IWL6000_FW_PRE,
746 .ucode_api_max = IWL6000_UCODE_API_MAX, 759 .ucode_api_max = IWL6000_UCODE_API_MAX,
747 .ucode_api_min = IWL6000_UCODE_API_MIN, 760 .ucode_api_min = IWL6000_UCODE_API_MIN,
748 .sku = IWL_SKU_A|IWL_SKU_G,
749 .valid_tx_ant = ANT_BC, 761 .valid_tx_ant = ANT_BC,
750 .valid_rx_ant = ANT_BC, 762 .valid_rx_ant = ANT_BC,
751 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 763 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
@@ -754,6 +766,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
754 .mod_params = &iwlagn_mod_params, 766 .mod_params = &iwlagn_mod_params,
755 .base_params = &iwl6000_base_params, 767 .base_params = &iwl6000_base_params,
756 .pa_type = IWL_PA_INTERNAL, 768 .pa_type = IWL_PA_INTERNAL,
769 .led_mode = IWL_LED_BLINK,
757}; 770};
758 771
759struct iwl_cfg iwl6000i_2bg_cfg = { 772struct iwl_cfg iwl6000i_2bg_cfg = {
@@ -761,7 +774,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
761 .fw_name_pre = IWL6000_FW_PRE, 774 .fw_name_pre = IWL6000_FW_PRE,
762 .ucode_api_max = IWL6000_UCODE_API_MAX, 775 .ucode_api_max = IWL6000_UCODE_API_MAX,
763 .ucode_api_min = IWL6000_UCODE_API_MIN, 776 .ucode_api_min = IWL6000_UCODE_API_MIN,
764 .sku = IWL_SKU_G,
765 .valid_tx_ant = ANT_BC, 777 .valid_tx_ant = ANT_BC,
766 .valid_rx_ant = ANT_BC, 778 .valid_rx_ant = ANT_BC,
767 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 779 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
@@ -770,6 +782,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
770 .mod_params = &iwlagn_mod_params, 782 .mod_params = &iwlagn_mod_params,
771 .base_params = &iwl6000_base_params, 783 .base_params = &iwl6000_base_params,
772 .pa_type = IWL_PA_INTERNAL, 784 .pa_type = IWL_PA_INTERNAL,
785 .led_mode = IWL_LED_BLINK,
773}; 786};
774 787
775struct iwl_cfg iwl6050_2agn_cfg = { 788struct iwl_cfg iwl6050_2agn_cfg = {
@@ -777,7 +790,6 @@ struct iwl_cfg iwl6050_2agn_cfg = {
777 .fw_name_pre = IWL6050_FW_PRE, 790 .fw_name_pre = IWL6050_FW_PRE,
778 .ucode_api_max = IWL6050_UCODE_API_MAX, 791 .ucode_api_max = IWL6050_UCODE_API_MAX,
779 .ucode_api_min = IWL6050_UCODE_API_MIN, 792 .ucode_api_min = IWL6050_UCODE_API_MIN,
780 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
781 .valid_tx_ant = ANT_AB, 793 .valid_tx_ant = ANT_AB,
782 .valid_rx_ant = ANT_AB, 794 .valid_rx_ant = ANT_AB,
783 .ops = &iwl6050_ops, 795 .ops = &iwl6050_ops,
@@ -787,6 +799,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
787 .base_params = &iwl6050_base_params, 799 .base_params = &iwl6050_base_params,
788 .ht_params = &iwl6000_ht_params, 800 .ht_params = &iwl6000_ht_params,
789 .need_dc_calib = true, 801 .need_dc_calib = true,
802 .led_mode = IWL_LED_BLINK,
790}; 803};
791 804
792struct iwl_cfg iwl6050g2_bgn_cfg = { 805struct iwl_cfg iwl6050g2_bgn_cfg = {
@@ -794,7 +807,6 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
794 .fw_name_pre = IWL6050_FW_PRE, 807 .fw_name_pre = IWL6050_FW_PRE,
795 .ucode_api_max = IWL6050_UCODE_API_MAX, 808 .ucode_api_max = IWL6050_UCODE_API_MAX,
796 .ucode_api_min = IWL6050_UCODE_API_MIN, 809 .ucode_api_min = IWL6050_UCODE_API_MIN,
797 .sku = IWL_SKU_G|IWL_SKU_N,
798 .valid_tx_ant = ANT_A, 810 .valid_tx_ant = ANT_A,
799 .valid_rx_ant = ANT_AB, 811 .valid_rx_ant = ANT_AB,
800 .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION, 812 .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION,
@@ -804,6 +816,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
804 .base_params = &iwl6050_base_params, 816 .base_params = &iwl6050_base_params,
805 .ht_params = &iwl6000_ht_params, 817 .ht_params = &iwl6000_ht_params,
806 .need_dc_calib = true, 818 .need_dc_calib = true,
819 .led_mode = IWL_LED_RF_STATE,
807}; 820};
808 821
809struct iwl_cfg iwl6050_2abg_cfg = { 822struct iwl_cfg iwl6050_2abg_cfg = {
@@ -811,7 +824,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
811 .fw_name_pre = IWL6050_FW_PRE, 824 .fw_name_pre = IWL6050_FW_PRE,
812 .ucode_api_max = IWL6050_UCODE_API_MAX, 825 .ucode_api_max = IWL6050_UCODE_API_MAX,
813 .ucode_api_min = IWL6050_UCODE_API_MIN, 826 .ucode_api_min = IWL6050_UCODE_API_MIN,
814 .sku = IWL_SKU_A|IWL_SKU_G,
815 .valid_tx_ant = ANT_AB, 827 .valid_tx_ant = ANT_AB,
816 .valid_rx_ant = ANT_AB, 828 .valid_rx_ant = ANT_AB,
817 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, 829 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
@@ -820,6 +832,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
820 .mod_params = &iwlagn_mod_params, 832 .mod_params = &iwlagn_mod_params,
821 .base_params = &iwl6050_base_params, 833 .base_params = &iwl6050_base_params,
822 .need_dc_calib = true, 834 .need_dc_calib = true,
835 .led_mode = IWL_LED_BLINK,
823}; 836};
824 837
825struct iwl_cfg iwl6000_3agn_cfg = { 838struct iwl_cfg iwl6000_3agn_cfg = {
@@ -827,7 +840,6 @@ struct iwl_cfg iwl6000_3agn_cfg = {
827 .fw_name_pre = IWL6000_FW_PRE, 840 .fw_name_pre = IWL6000_FW_PRE,
828 .ucode_api_max = IWL6000_UCODE_API_MAX, 841 .ucode_api_max = IWL6000_UCODE_API_MAX,
829 .ucode_api_min = IWL6000_UCODE_API_MIN, 842 .ucode_api_min = IWL6000_UCODE_API_MIN,
830 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
831 .valid_tx_ant = ANT_ABC, 843 .valid_tx_ant = ANT_ABC,
832 .valid_rx_ant = ANT_ABC, 844 .valid_rx_ant = ANT_ABC,
833 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 845 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
@@ -837,14 +849,14 @@ struct iwl_cfg iwl6000_3agn_cfg = {
837 .base_params = &iwl6000_base_params, 849 .base_params = &iwl6000_base_params,
838 .ht_params = &iwl6000_ht_params, 850 .ht_params = &iwl6000_ht_params,
839 .need_dc_calib = true, 851 .need_dc_calib = true,
852 .led_mode = IWL_LED_BLINK,
840}; 853};
841 854
842struct iwl_cfg iwl130_bgn_cfg = { 855struct iwl_cfg iwl130_bgn_cfg = {
843 .name = "Intel(R) 130 Series 1x1 BGN", 856 .name = "Intel(R) 130 Series 1x1 BGN",
844 .fw_name_pre = IWL6000G2B_FW_PRE, 857 .fw_name_pre = IWL6000G2B_FW_PRE,
845 .ucode_api_max = IWL130_UCODE_API_MAX, 858 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
846 .ucode_api_min = IWL130_UCODE_API_MIN, 859 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
847 .sku = IWL_SKU_G|IWL_SKU_N,
848 .valid_tx_ant = ANT_A, 860 .valid_tx_ant = ANT_A,
849 .valid_rx_ant = ANT_A, 861 .valid_rx_ant = ANT_A,
850 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 862 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -855,6 +867,8 @@ struct iwl_cfg iwl130_bgn_cfg = {
855 .bt_params = &iwl6000_bt_params, 867 .bt_params = &iwl6000_bt_params,
856 .ht_params = &iwl6000_ht_params, 868 .ht_params = &iwl6000_ht_params,
857 .need_dc_calib = true, 869 .need_dc_calib = true,
870 .led_mode = IWL_LED_RF_STATE,
871 .adv_pm = true,
858 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 872 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
859 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 873 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
860}; 874};
@@ -862,9 +876,8 @@ struct iwl_cfg iwl130_bgn_cfg = {
862struct iwl_cfg iwl130_bg_cfg = { 876struct iwl_cfg iwl130_bg_cfg = {
863 .name = "Intel(R) 130 Series 1x2 BG", 877 .name = "Intel(R) 130 Series 1x2 BG",
864 .fw_name_pre = IWL6000G2B_FW_PRE, 878 .fw_name_pre = IWL6000G2B_FW_PRE,
865 .ucode_api_max = IWL130_UCODE_API_MAX, 879 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
866 .ucode_api_min = IWL130_UCODE_API_MIN, 880 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
867 .sku = IWL_SKU_G,
868 .valid_tx_ant = ANT_A, 881 .valid_tx_ant = ANT_A,
869 .valid_rx_ant = ANT_A, 882 .valid_rx_ant = ANT_A,
870 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 883 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
@@ -874,6 +887,8 @@ struct iwl_cfg iwl130_bg_cfg = {
874 .base_params = &iwl6000_coex_base_params, 887 .base_params = &iwl6000_coex_base_params,
875 .bt_params = &iwl6000_bt_params, 888 .bt_params = &iwl6000_bt_params,
876 .need_dc_calib = true, 889 .need_dc_calib = true,
890 .led_mode = IWL_LED_RF_STATE,
891 .adv_pm = true,
877 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 892 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
878 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 893 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
879}; 894};
@@ -882,4 +897,3 @@ MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
882MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 897MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
883MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 898MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
884MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 899MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
885MODULE_FIRMWARE(IWL130_MODULE_FIRMWARE(IWL130_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index e2019e75693..d16bb5ede01 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -732,8 +732,122 @@ static inline u8 find_first_chain(u8 mask)
732 return CHAIN_C; 732 return CHAIN_C;
733} 733}
734 734
735/**
736 * Run disconnected antenna algorithm to find out which antennas are
737 * disconnected.
738 */
739static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
740 struct iwl_chain_noise_data *data)
741{
742 u32 active_chains = 0;
743 u32 max_average_sig;
744 u16 max_average_sig_antenna_i;
745 u8 num_tx_chains;
746 u8 first_chain;
747 u16 i = 0;
748
749 average_sig[0] = data->chain_signal_a /
750 priv->cfg->base_params->chain_noise_num_beacons;
751 average_sig[1] = data->chain_signal_b /
752 priv->cfg->base_params->chain_noise_num_beacons;
753 average_sig[2] = data->chain_signal_c /
754 priv->cfg->base_params->chain_noise_num_beacons;
755
756 if (average_sig[0] >= average_sig[1]) {
757 max_average_sig = average_sig[0];
758 max_average_sig_antenna_i = 0;
759 active_chains = (1 << max_average_sig_antenna_i);
760 } else {
761 max_average_sig = average_sig[1];
762 max_average_sig_antenna_i = 1;
763 active_chains = (1 << max_average_sig_antenna_i);
764 }
765
766 if (average_sig[2] >= max_average_sig) {
767 max_average_sig = average_sig[2];
768 max_average_sig_antenna_i = 2;
769 active_chains = (1 << max_average_sig_antenna_i);
770 }
771
772 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
773 average_sig[0], average_sig[1], average_sig[2]);
774 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
775 max_average_sig, max_average_sig_antenna_i);
776
777 /* Compare signal strengths for all 3 receivers. */
778 for (i = 0; i < NUM_RX_CHAINS; i++) {
779 if (i != max_average_sig_antenna_i) {
780 s32 rssi_delta = (max_average_sig - average_sig[i]);
781
782 /* If signal is very weak, compared with
783 * strongest, mark it as disconnected. */
784 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
785 data->disconn_array[i] = 1;
786 else
787 active_chains |= (1 << i);
788 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
789 "disconn_array[i] = %d\n",
790 i, rssi_delta, data->disconn_array[i]);
791 }
792 }
793
794 /*
795 * The above algorithm sometimes fails when the ucode
796 * reports 0 for all chains. It's not clear why that
797 * happens to start with, but it is then causing trouble
798 * because this can make us enable more chains than the
799 * hardware really has.
800 *
801 * To be safe, simply mask out any chains that we know
802 * are not on the device.
803 */
804 active_chains &= priv->hw_params.valid_rx_ant;
805
806 num_tx_chains = 0;
807 for (i = 0; i < NUM_RX_CHAINS; i++) {
808 /* loops on all the bits of
809 * priv->hw_setting.valid_tx_ant */
810 u8 ant_msk = (1 << i);
811 if (!(priv->hw_params.valid_tx_ant & ant_msk))
812 continue;
813
814 num_tx_chains++;
815 if (data->disconn_array[i] == 0)
816 /* there is a Tx antenna connected */
817 break;
818 if (num_tx_chains == priv->hw_params.tx_chains_num &&
819 data->disconn_array[i]) {
820 /*
821 * If all chains are disconnected
822 * connect the first valid tx chain
823 */
824 first_chain =
825 find_first_chain(priv->cfg->valid_tx_ant);
826 data->disconn_array[first_chain] = 0;
827 active_chains |= BIT(first_chain);
828 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
829 W/A - declare %d as connected\n",
830 first_chain);
831 break;
832 }
833 }
834
835 if (active_chains != priv->hw_params.valid_rx_ant &&
836 active_chains != priv->chain_noise_data.active_chains)
837 IWL_DEBUG_CALIB(priv,
838 "Detected that not all antennas are connected! "
839 "Connected: %#x, valid: %#x.\n",
840 active_chains, priv->hw_params.valid_rx_ant);
841
842 /* Save for use within RXON, TX, SCAN commands, etc. */
843 data->active_chains = active_chains;
844 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
845 active_chains);
846}
847
848
735/* 849/*
736 * Accumulate 20 beacons of signal and noise statistics for each of 850 * Accumulate 16 beacons of signal and noise statistics for each of
737 * 3 receivers/antennas/rx-chains, then figure out: 851 * 3 receivers/antennas/rx-chains, then figure out:
738 * 1) Which antennas are connected. 852 * 1) Which antennas are connected.
739 * 2) Differential rx gain settings to balance the 3 receivers. 853 * 2) Differential rx gain settings to balance the 3 receivers.
@@ -750,8 +864,6 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
750 u32 chain_sig_c; 864 u32 chain_sig_c;
751 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; 865 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
752 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; 866 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
753 u32 max_average_sig;
754 u16 max_average_sig_antenna_i;
755 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; 867 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
756 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; 868 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
757 u16 i = 0; 869 u16 i = 0;
@@ -759,11 +871,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
759 u16 stat_chnum = INITIALIZATION_VALUE; 871 u16 stat_chnum = INITIALIZATION_VALUE;
760 u8 rxon_band24; 872 u8 rxon_band24;
761 u8 stat_band24; 873 u8 stat_band24;
762 u32 active_chains = 0;
763 u8 num_tx_chains;
764 unsigned long flags; 874 unsigned long flags;
765 struct statistics_rx_non_phy *rx_info; 875 struct statistics_rx_non_phy *rx_info;
766 u8 first_chain; 876
767 /* 877 /*
768 * MULTI-FIXME: 878 * MULTI-FIXME:
769 * When we support multiple interfaces on different channels, 879 * When we support multiple interfaces on different channels,
@@ -869,108 +979,16 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
869 return; 979 return;
870 980
871 /* Analyze signal for disconnected antenna */ 981 /* Analyze signal for disconnected antenna */
872 average_sig[0] = data->chain_signal_a /
873 priv->cfg->base_params->chain_noise_num_beacons;
874 average_sig[1] = data->chain_signal_b /
875 priv->cfg->base_params->chain_noise_num_beacons;
876 average_sig[2] = data->chain_signal_c /
877 priv->cfg->base_params->chain_noise_num_beacons;
878
879 if (average_sig[0] >= average_sig[1]) {
880 max_average_sig = average_sig[0];
881 max_average_sig_antenna_i = 0;
882 active_chains = (1 << max_average_sig_antenna_i);
883 } else {
884 max_average_sig = average_sig[1];
885 max_average_sig_antenna_i = 1;
886 active_chains = (1 << max_average_sig_antenna_i);
887 }
888
889 if (average_sig[2] >= max_average_sig) {
890 max_average_sig = average_sig[2];
891 max_average_sig_antenna_i = 2;
892 active_chains = (1 << max_average_sig_antenna_i);
893 }
894
895 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
896 average_sig[0], average_sig[1], average_sig[2]);
897 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
898 max_average_sig, max_average_sig_antenna_i);
899
900 /* Compare signal strengths for all 3 receivers. */
901 for (i = 0; i < NUM_RX_CHAINS; i++) {
902 if (i != max_average_sig_antenna_i) {
903 s32 rssi_delta = (max_average_sig - average_sig[i]);
904
905 /* If signal is very weak, compared with
906 * strongest, mark it as disconnected. */
907 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
908 data->disconn_array[i] = 1;
909 else
910 active_chains |= (1 << i);
911 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
912 "disconn_array[i] = %d\n",
913 i, rssi_delta, data->disconn_array[i]);
914 }
915 }
916
917 /*
918 * The above algorithm sometimes fails when the ucode
919 * reports 0 for all chains. It's not clear why that
920 * happens to start with, but it is then causing trouble
921 * because this can make us enable more chains than the
922 * hardware really has.
923 *
924 * To be safe, simply mask out any chains that we know
925 * are not on the device.
926 */
927 if (priv->cfg->bt_params && 982 if (priv->cfg->bt_params &&
928 priv->cfg->bt_params->advanced_bt_coexist && 983 priv->cfg->bt_params->advanced_bt_coexist) {
929 priv->bt_full_concurrent) { 984 /* Disable disconnected antenna algorithm for advanced
930 /* operated as 1x1 in full concurrency mode */ 985 bt coex, assuming valid antennas are connected */
931 active_chains &= first_antenna(priv->hw_params.valid_rx_ant); 986 data->active_chains = priv->hw_params.valid_rx_ant;
987 for (i = 0; i < NUM_RX_CHAINS; i++)
988 if (!(data->active_chains & (1<<i)))
989 data->disconn_array[i] = 1;
932 } else 990 } else
933 active_chains &= priv->hw_params.valid_rx_ant; 991 iwl_find_disconn_antenna(priv, average_sig, data);
934
935 num_tx_chains = 0;
936 for (i = 0; i < NUM_RX_CHAINS; i++) {
937 /* loops on all the bits of
938 * priv->hw_setting.valid_tx_ant */
939 u8 ant_msk = (1 << i);
940 if (!(priv->hw_params.valid_tx_ant & ant_msk))
941 continue;
942
943 num_tx_chains++;
944 if (data->disconn_array[i] == 0)
945 /* there is a Tx antenna connected */
946 break;
947 if (num_tx_chains == priv->hw_params.tx_chains_num &&
948 data->disconn_array[i]) {
949 /*
950 * If all chains are disconnected
951 * connect the first valid tx chain
952 */
953 first_chain =
954 find_first_chain(priv->cfg->valid_tx_ant);
955 data->disconn_array[first_chain] = 0;
956 active_chains |= BIT(first_chain);
957 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - declare %d as connected\n",
958 first_chain);
959 break;
960 }
961 }
962
963 if (active_chains != priv->hw_params.valid_rx_ant &&
964 active_chains != priv->chain_noise_data.active_chains)
965 IWL_DEBUG_CALIB(priv,
966 "Detected that not all antennas are connected! "
967 "Connected: %#x, valid: %#x.\n",
968 active_chains, priv->hw_params.valid_rx_ant);
969
970 /* Save for use within RXON, TX, SCAN commands, etc. */
971 priv->chain_noise_data.active_chains = active_chains;
972 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
973 active_chains);
974 992
975 /* Analyze noise for rx balance */ 993 /* Analyze noise for rx balance */
976 average_noise[0] = data->chain_noise_a / 994 average_noise[0] = data->chain_noise_a /
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index a650baba080..8a4d3acb9b7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -248,6 +248,27 @@ err:
248 248
249} 249}
250 250
251int iwl_eeprom_check_sku(struct iwl_priv *priv)
252{
253 u16 eeprom_sku;
254
255 eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
256
257 priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
258 EEPROM_SKU_CAP_BAND_POS);
259 if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
260 priv->cfg->sku |= IWL_SKU_N;
261
262 if (!priv->cfg->sku) {
263 IWL_ERR(priv, "Invalid device sku\n");
264 return -EINVAL;
265 }
266
267 IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
268
269 return 0;
270}
271
251void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) 272void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
252{ 273{
253 const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv, 274 const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index ffb2f4111ad..366340f3fb0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -307,6 +307,7 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
307 307
308 if (ctx_bss->vif && ctx_pan->vif) { 308 if (ctx_bss->vif && ctx_pan->vif) {
309 int bcnint = ctx_pan->vif->bss_conf.beacon_int; 309 int bcnint = ctx_pan->vif->bss_conf.beacon_int;
310 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
310 311
311 /* should be set, but seems unused?? */ 312 /* should be set, but seems unused?? */
312 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE); 313 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
@@ -329,10 +330,10 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
329 if (test_bit(STATUS_SCAN_HW, &priv->status) || 330 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
330 (!ctx_bss->vif->bss_conf.idle && 331 (!ctx_bss->vif->bss_conf.idle &&
331 !ctx_bss->vif->bss_conf.assoc)) { 332 !ctx_bss->vif->bss_conf.assoc)) {
332 slot0 = bcnint * 3 - 20; 333 slot0 = dtim * bcnint * 3 - 20;
333 slot1 = 20; 334 slot1 = 20;
334 } else if (!ctx_pan->vif->bss_conf.idle && 335 } else if (!ctx_pan->vif->bss_conf.idle &&
335 !ctx_pan->vif->bss_conf.assoc) { 336 !ctx_pan->vif->bss_conf.assoc) {
336 slot1 = bcnint * 3 - 20; 337 slot1 = bcnint * 3 - 20;
337 slot0 = 20; 338 slot0 = 20;
338 } 339 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index b555edd5335..f8fe5f44e19 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -445,22 +445,17 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
445 445
446 if (priv->mac80211_registered && 446 if (priv->mac80211_registered &&
447 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 447 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
448 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 448 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
449 if (agg->state == IWL_AGG_OFF) 449 iwl_wake_queue(priv, txq);
450 iwl_wake_queue(priv, txq_id);
451 else
452 iwl_wake_queue(priv, txq->swq_id);
453 }
454 } 450 }
455 } else { 451 } else {
456 BUG_ON(txq_id != txq->swq_id);
457 iwlagn_set_tx_status(priv, info, tx_resp, txq_id, false); 452 iwlagn_set_tx_status(priv, info, tx_resp, txq_id, false);
458 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 453 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
459 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 454 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
460 455
461 if (priv->mac80211_registered && 456 if (priv->mac80211_registered &&
462 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 457 (iwl_queue_space(&txq->q) > txq->q.low_mark))
463 iwl_wake_queue(priv, txq_id); 458 iwl_wake_queue(priv, txq);
464 } 459 }
465 460
466 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 461 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
@@ -496,6 +491,10 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
496 struct iwlagn_tx_power_dbm_cmd tx_power_cmd; 491 struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
497 u8 tx_ant_cfg_cmd; 492 u8 tx_ant_cfg_cmd;
498 493
494 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
495 "TX Power requested while scanning!\n"))
496 return -EAGAIN;
497
499 /* half dBm need to multiply */ 498 /* half dBm need to multiply */
500 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); 499 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
501 500
@@ -522,9 +521,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
522 else 521 else
523 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; 522 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
524 523
525 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd, 524 return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
526 sizeof(tx_power_cmd), &tx_power_cmd, 525 &tx_power_cmd);
527 NULL);
528} 526}
529 527
530void iwlagn_temperature(struct iwl_priv *priv) 528void iwlagn_temperature(struct iwl_priv *priv)
@@ -750,6 +748,12 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv)
750 } else 748 } else
751 iwlagn_txq_ctx_reset(priv); 749 iwlagn_txq_ctx_reset(priv);
752 750
751 if (priv->cfg->base_params->shadow_reg_enable) {
752 /* enable shadow regs in HW */
753 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
754 0x800FFFFF);
755 }
756
753 set_bit(STATUS_INIT, &priv->status); 757 set_bit(STATUS_INIT, &priv->status);
754 758
755 return 0; 759 return 0;
@@ -1584,22 +1588,6 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1584 return ret; 1588 return ret;
1585} 1589}
1586 1590
1587void iwlagn_post_scan(struct iwl_priv *priv)
1588{
1589 struct iwl_rxon_context *ctx;
1590
1591 /*
1592 * Since setting the RXON may have been deferred while
1593 * performing the scan, fire one off if needed
1594 */
1595 for_each_context(priv, ctx)
1596 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1597 iwlagn_commit_rxon(priv, ctx);
1598
1599 if (priv->cfg->ops->hcmd->set_pan_params)
1600 priv->cfg->ops->hcmd->set_pan_params(priv);
1601}
1602
1603int iwlagn_manage_ibss_station(struct iwl_priv *priv, 1591int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1604 struct ieee80211_vif *vif, bool add) 1592 struct ieee80211_vif *vif, bool add)
1605{ 1593{
@@ -1841,6 +1829,10 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1841 } else { 1829 } else {
1842 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W << 1830 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1843 IWLAGN_BT_FLAG_COEX_MODE_SHIFT; 1831 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1832 if (priv->cfg->bt_params &&
1833 priv->cfg->bt_params->bt_sco_disable)
1834 bt_cmd.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
1835
1844 if (priv->bt_ch_announce) 1836 if (priv->bt_ch_announce)
1845 bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION; 1837 bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1846 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags); 1838 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags);
@@ -1884,12 +1876,20 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1884 struct iwl_rxon_context *ctx; 1876 struct iwl_rxon_context *ctx;
1885 int smps_request = -1; 1877 int smps_request = -1;
1886 1878
1879 /*
1880 * Note: bt_traffic_load can be overridden by scan complete and
1881 * coex profile notifications. Ignore that since only bad consequence
1882 * can be not matching debug print with actual state.
1883 */
1887 IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n", 1884 IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
1888 priv->bt_traffic_load); 1885 priv->bt_traffic_load);
1889 1886
1890 switch (priv->bt_traffic_load) { 1887 switch (priv->bt_traffic_load) {
1891 case IWL_BT_COEX_TRAFFIC_LOAD_NONE: 1888 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1892 smps_request = IEEE80211_SMPS_AUTOMATIC; 1889 if (priv->bt_status)
1890 smps_request = IEEE80211_SMPS_DYNAMIC;
1891 else
1892 smps_request = IEEE80211_SMPS_AUTOMATIC;
1893 break; 1893 break;
1894 case IWL_BT_COEX_TRAFFIC_LOAD_LOW: 1894 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1895 smps_request = IEEE80211_SMPS_DYNAMIC; 1895 smps_request = IEEE80211_SMPS_DYNAMIC;
@@ -1906,6 +1906,16 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1906 1906
1907 mutex_lock(&priv->mutex); 1907 mutex_lock(&priv->mutex);
1908 1908
1909 /*
1910 * We can not send command to firmware while scanning. When the scan
1911 * complete we will schedule this work again. We do check with mutex
1912 * locked to prevent new scan request to arrive. We do not check
1913 * STATUS_SCANNING to avoid race when queue_work two times from
1914 * different notifications, but quit and not perform any work at all.
1915 */
1916 if (test_bit(STATUS_SCAN_HW, &priv->status))
1917 goto out;
1918
1909 if (priv->cfg->ops->lib->update_chain_flags) 1919 if (priv->cfg->ops->lib->update_chain_flags)
1910 priv->cfg->ops->lib->update_chain_flags(priv); 1920 priv->cfg->ops->lib->update_chain_flags(priv);
1911 1921
@@ -1915,7 +1925,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1915 ieee80211_request_smps(ctx->vif, smps_request); 1925 ieee80211_request_smps(ctx->vif, smps_request);
1916 } 1926 }
1917 } 1927 }
1918 1928out:
1919 mutex_unlock(&priv->mutex); 1929 mutex_unlock(&priv->mutex);
1920} 1930}
1921 1931
@@ -1990,7 +2000,7 @@ static void iwlagn_set_kill_ack_msk(struct iwl_priv *priv,
1990 struct iwl_bt_uart_msg *uart_msg) 2000 struct iwl_bt_uart_msg *uart_msg)
1991{ 2001{
1992 u8 kill_ack_msk; 2002 u8 kill_ack_msk;
1993 __le32 bt_kill_ack_msg[2] = { 2003 static const __le32 bt_kill_ack_msg[2] = {
1994 cpu_to_le32(0xFFFFFFF), cpu_to_le32(0xFFFFFC00) }; 2004 cpu_to_le32(0xFFFFFFF), cpu_to_le32(0xFFFFFC00) };
1995 2005
1996 kill_ack_msk = (((BT_UART_MSG_FRAME3A2DP_MSK | 2006 kill_ack_msk = (((BT_UART_MSG_FRAME3A2DP_MSK |
@@ -2014,7 +2024,6 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
2014 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif; 2024 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
2015 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 }; 2025 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
2016 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg; 2026 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
2017 u8 last_traffic_load;
2018 2027
2019 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n"); 2028 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
2020 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status); 2029 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
@@ -2023,11 +2032,10 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
2023 coex->bt_ci_compliance); 2032 coex->bt_ci_compliance);
2024 iwlagn_print_uartmsg(priv, uart_msg); 2033 iwlagn_print_uartmsg(priv, uart_msg);
2025 2034
2026 last_traffic_load = priv->notif_bt_traffic_load; 2035 priv->last_bt_traffic_load = priv->bt_traffic_load;
2027 priv->notif_bt_traffic_load = coex->bt_traffic_load;
2028 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { 2036 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
2029 if (priv->bt_status != coex->bt_status || 2037 if (priv->bt_status != coex->bt_status ||
2030 last_traffic_load != coex->bt_traffic_load) { 2038 priv->last_bt_traffic_load != coex->bt_traffic_load) {
2031 if (coex->bt_status) { 2039 if (coex->bt_status) {
2032 /* BT on */ 2040 /* BT on */
2033 if (!priv->bt_ch_announce) 2041 if (!priv->bt_ch_announce)
@@ -2276,7 +2284,7 @@ static const char *get_csr_string(int cmd)
2276void iwl_dump_csr(struct iwl_priv *priv) 2284void iwl_dump_csr(struct iwl_priv *priv)
2277{ 2285{
2278 int i; 2286 int i;
2279 u32 csr_tbl[] = { 2287 static const u32 csr_tbl[] = {
2280 CSR_HW_IF_CONFIG_REG, 2288 CSR_HW_IF_CONFIG_REG,
2281 CSR_INT_COALESCING, 2289 CSR_INT_COALESCING,
2282 CSR_INT, 2290 CSR_INT,
@@ -2335,7 +2343,7 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2335 int pos = 0; 2343 int pos = 0;
2336 size_t bufsz = 0; 2344 size_t bufsz = 0;
2337#endif 2345#endif
2338 u32 fh_tbl[] = { 2346 static const u32 fh_tbl[] = {
2339 FH_RSCSR_CHNL0_STTS_WPTR_REG, 2347 FH_RSCSR_CHNL0_STTS_WPTR_REG,
2340 FH_RSCSR_CHNL0_RBDCB_BASE_REG, 2348 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
2341 FH_RSCSR_CHNL0_WPTR, 2349 FH_RSCSR_CHNL0_WPTR,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 065553629de..f450adc7236 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -833,17 +833,23 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
833 struct iwl_lq_sta *lq_sta) 833 struct iwl_lq_sta *lq_sta)
834{ 834{
835 struct iwl_scale_tbl_info *tbl; 835 struct iwl_scale_tbl_info *tbl;
836 bool full_concurrent; 836 bool full_concurrent = priv->bt_full_concurrent;
837 unsigned long flags; 837 unsigned long flags;
838 838
839 spin_lock_irqsave(&priv->lock, flags); 839 if (priv->bt_ant_couple_ok) {
840 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok) 840 /*
841 full_concurrent = true; 841 * Is there a need to switch between
842 else 842 * full concurrency and 3-wire?
843 full_concurrent = false; 843 */
844 spin_unlock_irqrestore(&priv->lock, flags); 844 spin_lock_irqsave(&priv->lock, flags);
845 845 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
846 if (priv->bt_full_concurrent != full_concurrent) { 846 full_concurrent = true;
847 else
848 full_concurrent = false;
849 spin_unlock_irqrestore(&priv->lock, flags);
850 }
851 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
852 (priv->bt_full_concurrent != full_concurrent)) {
847 priv->bt_full_concurrent = full_concurrent; 853 priv->bt_full_concurrent = full_concurrent;
848 854
849 /* Update uCode's rate table. */ 855 /* Update uCode's rate table. */
@@ -1040,8 +1046,7 @@ done:
1040 if (sta && sta->supp_rates[sband->band]) 1046 if (sta && sta->supp_rates[sband->band])
1041 rs_rate_scale_perform(priv, skb, sta, lq_sta); 1047 rs_rate_scale_perform(priv, skb, sta, lq_sta);
1042 1048
1043 /* Is there a need to switch between full concurrency and 3-wire? */ 1049 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
1044 if (priv->bt_ant_couple_ok)
1045 rs_bt_update_lq(priv, ctx, lq_sta); 1050 rs_bt_update_lq(priv, ctx, lq_sta);
1046} 1051}
1047 1052
@@ -3010,10 +3015,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3010 */ 3015 */
3011 if (priv && priv->cfg->bt_params && 3016 if (priv && priv->cfg->bt_params &&
3012 priv->cfg->bt_params->agg_time_limit && 3017 priv->cfg->bt_params->agg_time_limit &&
3013 priv->cfg->bt_params->agg_time_limit >= 3018 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
3014 LINK_QUAL_AGG_TIME_LIMIT_MIN &&
3015 priv->cfg->bt_params->agg_time_limit <=
3016 LINK_QUAL_AGG_TIME_LIMIT_MAX)
3017 lq_cmd->agg_params.agg_time_limit = 3019 lq_cmd->agg_params.agg_time_limit =
3018 cpu_to_le16(priv->cfg->bt_params->agg_time_limit); 3020 cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
3019} 3021}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
new file mode 100644
index 00000000000..203ee60a82b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -0,0 +1,632 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include "iwl-dev.h"
28#include "iwl-agn.h"
29#include "iwl-sta.h"
30#include "iwl-core.h"
31#include "iwl-agn-calib.h"
32
33static int iwlagn_disable_bss(struct iwl_priv *priv,
34 struct iwl_rxon_context *ctx,
35 struct iwl_rxon_cmd *send)
36{
37 __le32 old_filter = send->filter_flags;
38 int ret;
39
40 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
41 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
42
43 send->filter_flags = old_filter;
44
45 if (ret)
46 IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
47
48 return ret;
49}
50
51static int iwlagn_disable_pan(struct iwl_priv *priv,
52 struct iwl_rxon_context *ctx,
53 struct iwl_rxon_cmd *send)
54{
55 __le32 old_filter = send->filter_flags;
56 u8 old_dev_type = send->dev_type;
57 int ret;
58
59 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
60 send->dev_type = RXON_DEV_TYPE_P2P;
61 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
62
63 send->filter_flags = old_filter;
64 send->dev_type = old_dev_type;
65
66 if (ret)
67 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
68
69 /* FIXME: WAIT FOR PAN DISABLE */
70 msleep(300);
71
72 return ret;
73}
74
75static void iwlagn_update_qos(struct iwl_priv *priv,
76 struct iwl_rxon_context *ctx)
77{
78 int ret;
79
80 if (!ctx->is_active)
81 return;
82
83 ctx->qos_data.def_qos_parm.qos_flags = 0;
84
85 if (ctx->qos_data.qos_active)
86 ctx->qos_data.def_qos_parm.qos_flags |=
87 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
88
89 if (ctx->ht.enabled)
90 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
91
92 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
93 ctx->qos_data.qos_active,
94 ctx->qos_data.def_qos_parm.qos_flags);
95
96 ret = iwl_send_cmd_pdu(priv, ctx->qos_cmd,
97 sizeof(struct iwl_qosparam_cmd),
98 &ctx->qos_data.def_qos_parm);
99 if (ret)
100 IWL_ERR(priv, "Failed to update QoS\n");
101}
102
103static int iwlagn_update_beacon(struct iwl_priv *priv,
104 struct ieee80211_vif *vif)
105{
106 lockdep_assert_held(&priv->mutex);
107
108 dev_kfree_skb(priv->beacon_skb);
109 priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
110 if (!priv->beacon_skb)
111 return -ENOMEM;
112 return iwlagn_send_beacon_cmd(priv);
113}
114
115/**
116 * iwlagn_commit_rxon - commit staging_rxon to hardware
117 *
118 * The RXON command in staging_rxon is committed to the hardware and
119 * the active_rxon structure is updated with the new data. This
120 * function correctly transitions out of the RXON_ASSOC_MSK state if
121 * a HW tune is required based on the RXON structure changes.
122 */
123int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
124{
125 /* cast away the const for active_rxon in this function */
126 struct iwl_rxon_cmd *active = (void *)&ctx->active;
127 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
128 bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
129 int ret;
130
131 lockdep_assert_held(&priv->mutex);
132
133 if (!iwl_is_alive(priv))
134 return -EBUSY;
135
136 /* This function hardcodes a bunch of dual-mode assumptions */
137 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
138
139 if (!ctx->is_active)
140 return 0;
141
142 /* always get timestamp with Rx frame */
143 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
144
145 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
146 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
147 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
148 else
149 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
150
151 ret = iwl_check_rxon_cmd(priv, ctx);
152 if (ret) {
153 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
154 return -EINVAL;
155 }
156
157 /*
158 * receive commit_rxon request
159 * abort any previous channel switch if still in process
160 */
161 if (priv->switch_rxon.switch_in_progress &&
162 (priv->switch_rxon.channel != ctx->staging.channel)) {
163 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
164 le16_to_cpu(priv->switch_rxon.channel));
165 iwl_chswitch_done(priv, false);
166 }
167
168 /*
169 * If we don't need to send a full RXON, we can use
170 * iwl_rxon_assoc_cmd which is used to reconfigure filter
171 * and other flags for the current radio configuration.
172 */
173 if (!iwl_full_rxon_required(priv, ctx)) {
174 ret = iwl_send_rxon_assoc(priv, ctx);
175 if (ret) {
176 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
177 return ret;
178 }
179
180 memcpy(active, &ctx->staging, sizeof(*active));
181 iwl_print_rx_config_cmd(priv, ctx);
182 return 0;
183 }
184
185 if (priv->cfg->ops->hcmd->set_pan_params) {
186 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
187 if (ret)
188 return ret;
189 }
190
191 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
192
193 IWL_DEBUG_INFO(priv,
194 "Going to commit RXON\n"
195 " * with%s RXON_FILTER_ASSOC_MSK\n"
196 " * channel = %d\n"
197 " * bssid = %pM\n",
198 (new_assoc ? "" : "out"),
199 le16_to_cpu(ctx->staging.channel),
200 ctx->staging.bssid_addr);
201
202 /*
203 * Always clear associated first, but with the correct config.
204 * This is required as for example station addition for the
205 * AP station must be done after the BSSID is set to correctly
206 * set up filters in the device.
207 */
208 if ((old_assoc && new_assoc) || !new_assoc) {
209 if (ctx->ctxid == IWL_RXON_CTX_BSS)
210 ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
211 else
212 ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
213 if (ret)
214 return ret;
215
216 memcpy(active, &ctx->staging, sizeof(*active));
217
218 /*
219 * Un-assoc RXON clears the station table and WEP
220 * keys, so we have to restore those afterwards.
221 */
222 iwl_clear_ucode_stations(priv, ctx);
223 iwl_restore_stations(priv, ctx);
224 ret = iwl_restore_default_wep_keys(priv, ctx);
225 if (ret) {
226 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
227 return ret;
228 }
229 }
230
231 /* RXON timing must be before associated RXON */
232 ret = iwl_send_rxon_timing(priv, ctx);
233 if (ret) {
234 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
235 return ret;
236 }
237
238 if (new_assoc) {
239 /* QoS info may be cleared by previous un-assoc RXON */
240 iwlagn_update_qos(priv, ctx);
241
242 /*
243 * We'll run into this code path when beaconing is
244 * enabled, but then we also need to send the beacon
245 * to the device.
246 */
247 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
248 ret = iwlagn_update_beacon(priv, ctx->vif);
249 if (ret) {
250 IWL_ERR(priv,
251 "Error sending required beacon (%d)!\n",
252 ret);
253 return ret;
254 }
255 }
256
257 priv->start_calib = 0;
258 /*
259 * Apply the new configuration.
260 *
261 * Associated RXON doesn't clear the station table in uCode,
262 * so we don't need to restore stations etc. after this.
263 */
264 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
265 sizeof(struct iwl_rxon_cmd), &ctx->staging);
266 if (ret) {
267 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
268 return ret;
269 }
270 memcpy(active, &ctx->staging, sizeof(*active));
271
272 iwl_reprogram_ap_sta(priv, ctx);
273
274 /* IBSS beacon needs to be sent after setting assoc */
275 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
276 if (iwlagn_update_beacon(priv, ctx->vif))
277 IWL_ERR(priv, "Error sending IBSS beacon\n");
278 }
279
280 iwl_print_rx_config_cmd(priv, ctx);
281
282 iwl_init_sensitivity(priv);
283
284 /*
285 * If we issue a new RXON command which required a tune then we must
286 * send a new TXPOWER command or we won't be able to Tx any frames.
287 *
288 * FIXME: which RXON requires a tune? Can we optimise this out in
289 * some cases?
290 */
291 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
292 if (ret) {
293 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
294 return ret;
295 }
296
297 return 0;
298}
299
300int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
301{
302 struct iwl_priv *priv = hw->priv;
303 struct iwl_rxon_context *ctx;
304 struct ieee80211_conf *conf = &hw->conf;
305 struct ieee80211_channel *channel = conf->channel;
306 const struct iwl_channel_info *ch_info;
307 int ret = 0;
308 bool ht_changed[NUM_IWL_RXON_CTX] = {};
309
310 IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
311
312 mutex_lock(&priv->mutex);
313
314 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
315 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
316 goto out;
317 }
318
319 if (!iwl_is_ready(priv)) {
320 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
321 goto out;
322 }
323
324 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
325 IEEE80211_CONF_CHANGE_CHANNEL)) {
326 /* mac80211 uses static for non-HT which is what we want */
327 priv->current_ht_config.smps = conf->smps_mode;
328
329 /*
330 * Recalculate chain counts.
331 *
332 * If monitor mode is enabled then mac80211 will
333 * set up the SM PS mode to OFF if an HT channel is
334 * configured.
335 */
336 if (priv->cfg->ops->hcmd->set_rxon_chain)
337 for_each_context(priv, ctx)
338 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
339 }
340
341 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
342 unsigned long flags;
343
344 ch_info = iwl_get_channel_info(priv, channel->band,
345 channel->hw_value);
346 if (!is_channel_valid(ch_info)) {
347 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
348 ret = -EINVAL;
349 goto out;
350 }
351
352 spin_lock_irqsave(&priv->lock, flags);
353
354 for_each_context(priv, ctx) {
355 /* Configure HT40 channels */
356 if (ctx->ht.enabled != conf_is_ht(conf)) {
357 ctx->ht.enabled = conf_is_ht(conf);
358 ht_changed[ctx->ctxid] = true;
359 }
360
361 if (ctx->ht.enabled) {
362 if (conf_is_ht40_minus(conf)) {
363 ctx->ht.extension_chan_offset =
364 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
365 ctx->ht.is_40mhz = true;
366 } else if (conf_is_ht40_plus(conf)) {
367 ctx->ht.extension_chan_offset =
368 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
369 ctx->ht.is_40mhz = true;
370 } else {
371 ctx->ht.extension_chan_offset =
372 IEEE80211_HT_PARAM_CHA_SEC_NONE;
373 ctx->ht.is_40mhz = false;
374 }
375 } else
376 ctx->ht.is_40mhz = false;
377
378 /*
379 * Default to no protection. Protection mode will
380 * later be set from BSS config in iwl_ht_conf
381 */
382 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
383
384 /* if we are switching from ht to 2.4 clear flags
385 * from any ht related info since 2.4 does not
386 * support ht */
387 if (le16_to_cpu(ctx->staging.channel) !=
388 channel->hw_value)
389 ctx->staging.flags = 0;
390
391 iwl_set_rxon_channel(priv, channel, ctx);
392 iwl_set_rxon_ht(priv, &priv->current_ht_config);
393
394 iwl_set_flags_for_band(priv, ctx, channel->band,
395 ctx->vif);
396 }
397
398 spin_unlock_irqrestore(&priv->lock, flags);
399
400 iwl_update_bcast_stations(priv);
401
402 /*
403 * The list of supported rates and rate mask can be different
404 * for each band; since the band may have changed, reset
405 * the rate mask to what mac80211 lists.
406 */
407 iwl_set_rate(priv);
408 }
409
410 if (changed & (IEEE80211_CONF_CHANGE_PS |
411 IEEE80211_CONF_CHANGE_IDLE)) {
412 ret = iwl_power_update_mode(priv, false);
413 if (ret)
414 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
415 }
416
417 if (changed & IEEE80211_CONF_CHANGE_POWER) {
418 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
419 priv->tx_power_user_lmt, conf->power_level);
420
421 iwl_set_tx_power(priv, conf->power_level, false);
422 }
423
424 for_each_context(priv, ctx) {
425 if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
426 continue;
427 iwlagn_commit_rxon(priv, ctx);
428 if (ht_changed[ctx->ctxid])
429 iwlagn_update_qos(priv, ctx);
430 }
431 out:
432 mutex_unlock(&priv->mutex);
433 return ret;
434}
435
436static void iwlagn_check_needed_chains(struct iwl_priv *priv,
437 struct iwl_rxon_context *ctx,
438 struct ieee80211_bss_conf *bss_conf)
439{
440 struct ieee80211_vif *vif = ctx->vif;
441 struct iwl_rxon_context *tmp;
442 struct ieee80211_sta *sta;
443 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
444 bool need_multiple;
445
446 lockdep_assert_held(&priv->mutex);
447
448 switch (vif->type) {
449 case NL80211_IFTYPE_STATION:
450 rcu_read_lock();
451 sta = ieee80211_find_sta(vif, bss_conf->bssid);
452 if (sta) {
453 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
454 int maxstreams;
455
456 maxstreams = (ht_cap->mcs.tx_params &
457 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
458 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
459 maxstreams += 1;
460
461 need_multiple = true;
462
463 if ((ht_cap->mcs.rx_mask[1] == 0) &&
464 (ht_cap->mcs.rx_mask[2] == 0))
465 need_multiple = false;
466 if (maxstreams <= 1)
467 need_multiple = false;
468 } else {
469 /*
470 * If at all, this can only happen through a race
471 * when the AP disconnects us while we're still
472 * setting up the connection, in that case mac80211
473 * will soon tell us about that.
474 */
475 need_multiple = false;
476 }
477 rcu_read_unlock();
478 break;
479 case NL80211_IFTYPE_ADHOC:
480 /* currently */
481 need_multiple = false;
482 break;
483 default:
484 /* only AP really */
485 need_multiple = true;
486 break;
487 }
488
489 ctx->ht_need_multiple_chains = need_multiple;
490
491 if (!need_multiple) {
492 /* check all contexts */
493 for_each_context(priv, tmp) {
494 if (!tmp->vif)
495 continue;
496 if (tmp->ht_need_multiple_chains) {
497 need_multiple = true;
498 break;
499 }
500 }
501 }
502
503 ht_conf->single_chain_sufficient = !need_multiple;
504}
505
506void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
507 struct ieee80211_vif *vif,
508 struct ieee80211_bss_conf *bss_conf,
509 u32 changes)
510{
511 struct iwl_priv *priv = hw->priv;
512 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
513 int ret;
514 bool force = false;
515
516 mutex_lock(&priv->mutex);
517
518 if (WARN_ON(!ctx->vif)) {
519 mutex_unlock(&priv->mutex);
520 return;
521 }
522
523 if (changes & BSS_CHANGED_BEACON_INT)
524 force = true;
525
526 if (changes & BSS_CHANGED_QOS) {
527 ctx->qos_data.qos_active = bss_conf->qos;
528 iwlagn_update_qos(priv, ctx);
529 }
530
531 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
532 if (vif->bss_conf.use_short_preamble)
533 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
534 else
535 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
536
537 if (changes & BSS_CHANGED_ASSOC) {
538 if (bss_conf->assoc) {
539 iwl_led_associate(priv);
540 priv->timestamp = bss_conf->timestamp;
541 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
542 } else {
543 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
544 iwl_led_disassociate(priv);
545 }
546 }
547
548 if (ctx->ht.enabled) {
549 ctx->ht.protection = bss_conf->ht_operation_mode &
550 IEEE80211_HT_OP_MODE_PROTECTION;
551 ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
552 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
553 iwlagn_check_needed_chains(priv, ctx, bss_conf);
554 iwl_set_rxon_ht(priv, &priv->current_ht_config);
555 }
556
557 if (priv->cfg->ops->hcmd->set_rxon_chain)
558 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
559
560 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
561 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
562 else
563 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
564
565 if (bss_conf->use_cts_prot)
566 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
567 else
568 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
569
570 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
571
572 if (vif->type == NL80211_IFTYPE_AP ||
573 vif->type == NL80211_IFTYPE_ADHOC) {
574 if (vif->bss_conf.enable_beacon) {
575 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
576 priv->beacon_ctx = ctx;
577 } else {
578 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
579 priv->beacon_ctx = NULL;
580 }
581 }
582
583 if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
584 iwlagn_commit_rxon(priv, ctx);
585
586 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
587 /*
588 * The chain noise calibration will enable PM upon
589 * completion. If calibration has already been run
590 * then we need to enable power management here.
591 */
592 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
593 iwl_power_update_mode(priv, false);
594
595 /* Enable RX differential gain and sensitivity calibrations */
596 iwl_chain_noise_reset(priv);
597 priv->start_calib = 1;
598 }
599
600 if (changes & BSS_CHANGED_IBSS) {
601 ret = iwlagn_manage_ibss_station(priv, vif,
602 bss_conf->ibss_joined);
603 if (ret)
604 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
605 bss_conf->ibss_joined ? "add" : "remove",
606 bss_conf->bssid);
607 }
608
609 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
610 priv->beacon_ctx) {
611 if (iwlagn_update_beacon(priv, vif))
612 IWL_ERR(priv, "Error sending IBSS beacon\n");
613 }
614
615 mutex_unlock(&priv->mutex);
616}
617
618void iwlagn_post_scan(struct iwl_priv *priv)
619{
620 struct iwl_rxon_context *ctx;
621
622 /*
623 * Since setting the RXON may have been deferred while
624 * performing the scan, fire one off if needed
625 */
626 for_each_context(priv, ctx)
627 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
628 iwlagn_commit_rxon(priv, ctx);
629
630 if (priv->cfg->ops->hcmd->set_pan_params)
631 priv->cfg->ops->hcmd->set_pan_params(priv);
632}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 35a30d2e073..35f085ac336 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -684,7 +684,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
684 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 684 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
685} 685}
686 686
687void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) 687static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
688{ 688{
689 unsigned long flags; 689 unsigned long flags;
690 690
@@ -714,3 +714,33 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
714 spin_unlock_irqrestore(&priv->sta_lock, flags); 714 spin_unlock_irqrestore(&priv->sta_lock, flags);
715 715
716} 716}
717
718void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
719 struct ieee80211_vif *vif,
720 enum sta_notify_cmd cmd,
721 struct ieee80211_sta *sta)
722{
723 struct iwl_priv *priv = hw->priv;
724 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
725 int sta_id;
726
727 switch (cmd) {
728 case STA_NOTIFY_SLEEP:
729 WARN_ON(!sta_priv->client);
730 sta_priv->asleep = true;
731 if (atomic_read(&sta_priv->pending_frames) > 0)
732 ieee80211_sta_block_awake(hw, sta, true);
733 break;
734 case STA_NOTIFY_AWAKE:
735 WARN_ON(!sta_priv->client);
736 if (!sta_priv->asleep)
737 break;
738 sta_priv->asleep = false;
739 sta_id = iwl_sta_id(sta);
740 if (sta_id != IWL_INVALID_STATION)
741 iwl_sta_modify_ps_wake(priv, sta_id);
742 break;
743 default:
744 break;
745 }
746}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 2b078a99572..07bbc915529 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -518,11 +518,11 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
518 struct iwl_cmd_meta *out_meta; 518 struct iwl_cmd_meta *out_meta;
519 struct iwl_tx_cmd *tx_cmd; 519 struct iwl_tx_cmd *tx_cmd;
520 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 520 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
521 int swq_id, txq_id; 521 int txq_id;
522 dma_addr_t phys_addr; 522 dma_addr_t phys_addr;
523 dma_addr_t txcmd_phys; 523 dma_addr_t txcmd_phys;
524 dma_addr_t scratch_phys; 524 dma_addr_t scratch_phys;
525 u16 len, len_org, firstlen, secondlen; 525 u16 len, firstlen, secondlen;
526 u16 seq_number = 0; 526 u16 seq_number = 0;
527 __le16 fc; 527 __le16 fc;
528 u8 hdr_len; 528 u8 hdr_len;
@@ -620,7 +620,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
620 } 620 }
621 621
622 txq = &priv->txq[txq_id]; 622 txq = &priv->txq[txq_id];
623 swq_id = txq->swq_id;
624 q = &txq->q; 623 q = &txq->q;
625 624
626 if (unlikely(iwl_queue_space(q) < q->high_mark)) { 625 if (unlikely(iwl_queue_space(q) < q->high_mark)) {
@@ -687,30 +686,23 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
687 */ 686 */
688 len = sizeof(struct iwl_tx_cmd) + 687 len = sizeof(struct iwl_tx_cmd) +
689 sizeof(struct iwl_cmd_header) + hdr_len; 688 sizeof(struct iwl_cmd_header) + hdr_len;
690 689 firstlen = (len + 3) & ~3;
691 len_org = len;
692 firstlen = len = (len + 3) & ~3;
693
694 if (len_org != len)
695 len_org = 1;
696 else
697 len_org = 0;
698 690
699 /* Tell NIC about any 2-byte padding after MAC header */ 691 /* Tell NIC about any 2-byte padding after MAC header */
700 if (len_org) 692 if (firstlen != len)
701 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 693 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
702 694
703 /* Physical address of this Tx command's header (not MAC header!), 695 /* Physical address of this Tx command's header (not MAC header!),
704 * within command buffer array. */ 696 * within command buffer array. */
705 txcmd_phys = pci_map_single(priv->pci_dev, 697 txcmd_phys = pci_map_single(priv->pci_dev,
706 &out_cmd->hdr, len, 698 &out_cmd->hdr, firstlen,
707 PCI_DMA_BIDIRECTIONAL); 699 PCI_DMA_BIDIRECTIONAL);
708 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 700 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
709 dma_unmap_len_set(out_meta, len, len); 701 dma_unmap_len_set(out_meta, len, firstlen);
710 /* Add buffer containing Tx command and MAC(!) header to TFD's 702 /* Add buffer containing Tx command and MAC(!) header to TFD's
711 * first entry */ 703 * first entry */
712 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 704 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
713 txcmd_phys, len, 1, 0); 705 txcmd_phys, firstlen, 1, 0);
714 706
715 if (!ieee80211_has_morefrags(hdr->frame_control)) { 707 if (!ieee80211_has_morefrags(hdr->frame_control)) {
716 txq->need_update = 1; 708 txq->need_update = 1;
@@ -721,23 +713,21 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
721 713
722 /* Set up TFD's 2nd entry to point directly to remainder of skb, 714 /* Set up TFD's 2nd entry to point directly to remainder of skb,
723 * if any (802.11 null frames have no payload). */ 715 * if any (802.11 null frames have no payload). */
724 secondlen = len = skb->len - hdr_len; 716 secondlen = skb->len - hdr_len;
725 if (len) { 717 if (secondlen > 0) {
726 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 718 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
727 len, PCI_DMA_TODEVICE); 719 secondlen, PCI_DMA_TODEVICE);
728 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 720 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
729 phys_addr, len, 721 phys_addr, secondlen,
730 0, 0); 722 0, 0);
731 } 723 }
732 724
733 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 725 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
734 offsetof(struct iwl_tx_cmd, scratch); 726 offsetof(struct iwl_tx_cmd, scratch);
735 727
736 len = sizeof(struct iwl_tx_cmd) +
737 sizeof(struct iwl_cmd_header) + hdr_len;
738 /* take back ownership of DMA buffer to enable update */ 728 /* take back ownership of DMA buffer to enable update */
739 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, 729 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
740 len, PCI_DMA_BIDIRECTIONAL); 730 firstlen, PCI_DMA_BIDIRECTIONAL);
741 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 731 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
742 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 732 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
743 733
@@ -753,7 +743,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
753 le16_to_cpu(tx_cmd->len)); 743 le16_to_cpu(tx_cmd->len));
754 744
755 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, 745 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
756 len, PCI_DMA_BIDIRECTIONAL); 746 firstlen, PCI_DMA_BIDIRECTIONAL);
757 747
758 trace_iwlwifi_dev_tx(priv, 748 trace_iwlwifi_dev_tx(priv,
759 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], 749 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
@@ -784,7 +774,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
784 iwl_txq_update_write_ptr(priv, txq); 774 iwl_txq_update_write_ptr(priv, txq);
785 spin_unlock_irqrestore(&priv->lock, flags); 775 spin_unlock_irqrestore(&priv->lock, flags);
786 } else { 776 } else {
787 iwl_stop_queue(priv, txq->swq_id); 777 iwl_stop_queue(priv, txq);
788 } 778 }
789 } 779 }
790 780
@@ -1013,7 +1003,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
1013 tid_data = &priv->stations[sta_id].tid[tid]; 1003 tid_data = &priv->stations[sta_id].tid[tid];
1014 *ssn = SEQ_TO_SN(tid_data->seq_number); 1004 *ssn = SEQ_TO_SN(tid_data->seq_number);
1015 tid_data->agg.txq_id = txq_id; 1005 tid_data->agg.txq_id = txq_id;
1016 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id); 1006 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
1017 spin_unlock_irqrestore(&priv->sta_lock, flags); 1007 spin_unlock_irqrestore(&priv->sta_lock, flags);
1018 1008
1019 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, 1009 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
@@ -1241,37 +1231,61 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1241 if (sh < 0) /* tbw something is wrong with indices */ 1231 if (sh < 0) /* tbw something is wrong with indices */
1242 sh += 0x100; 1232 sh += 0x100;
1243 1233
1244 /* don't use 64-bit values for now */
1245 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1246
1247 if (agg->frame_count > (64 - sh)) { 1234 if (agg->frame_count > (64 - sh)) {
1248 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); 1235 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1249 return -1; 1236 return -1;
1250 } 1237 }
1251 1238 if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) {
1252 /* check for success or failure according to the 1239 /*
1253 * transmitted bitmap and block-ack bitmap */ 1240 * sent and ack information provided by uCode
1254 sent_bitmap = bitmap & agg->bitmap; 1241 * use it instead of figure out ourself
1255 1242 */
1256 /* For each frame attempted in aggregation, 1243 if (ba_resp->txed_2_done > ba_resp->txed) {
1257 * update driver's record of tx frame's status. */ 1244 IWL_DEBUG_TX_REPLY(priv,
1258 i = 0; 1245 "bogus sent(%d) and ack(%d) count\n",
1259 while (sent_bitmap) { 1246 ba_resp->txed, ba_resp->txed_2_done);
1260 ack = sent_bitmap & 1ULL; 1247 /*
1261 successes += ack; 1248 * set txed_2_done = txed,
1262 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", 1249 * so it won't impact rate scale
1263 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, 1250 */
1264 agg->start_idx + i); 1251 ba_resp->txed = ba_resp->txed_2_done;
1265 sent_bitmap >>= 1; 1252 }
1266 ++i; 1253 IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
1254 ba_resp->txed, ba_resp->txed_2_done);
1255 } else {
1256 /* don't use 64-bit values for now */
1257 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1258
1259 /* check for success or failure according to the
1260 * transmitted bitmap and block-ack bitmap */
1261 sent_bitmap = bitmap & agg->bitmap;
1262
1263 /* For each frame attempted in aggregation,
1264 * update driver's record of tx frame's status. */
1265 i = 0;
1266 while (sent_bitmap) {
1267 ack = sent_bitmap & 1ULL;
1268 successes += ack;
1269 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1270 ack ? "ACK" : "NACK", i,
1271 (agg->start_idx + i) & 0xff,
1272 agg->start_idx + i);
1273 sent_bitmap >>= 1;
1274 ++i;
1275 }
1267 } 1276 }
1268
1269 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); 1277 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1270 memset(&info->status, 0, sizeof(info->status)); 1278 memset(&info->status, 0, sizeof(info->status));
1271 info->flags |= IEEE80211_TX_STAT_ACK; 1279 info->flags |= IEEE80211_TX_STAT_ACK;
1272 info->flags |= IEEE80211_TX_STAT_AMPDU; 1280 info->flags |= IEEE80211_TX_STAT_AMPDU;
1273 info->status.ampdu_ack_len = successes; 1281 if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) {
1274 info->status.ampdu_len = agg->frame_count; 1282 info->status.ampdu_ack_len = ba_resp->txed_2_done;
1283 info->status.ampdu_len = ba_resp->txed;
1284
1285 } else {
1286 info->status.ampdu_ack_len = successes;
1287 info->status.ampdu_len = agg->frame_count;
1288 }
1275 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); 1289 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1276 1290
1277 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); 1291 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
@@ -1385,7 +1399,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1385 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 1399 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1386 priv->mac80211_registered && 1400 priv->mac80211_registered &&
1387 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) 1401 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1388 iwl_wake_queue(priv, txq->swq_id); 1402 iwl_wake_queue(priv, txq);
1389 1403
1390 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); 1404 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
1391 } 1405 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 703621107da..411a7a20450 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -40,30 +40,36 @@
40#include "iwl-agn.h" 40#include "iwl-agn.h"
41#include "iwl-agn-calib.h" 41#include "iwl-agn-calib.h"
42 42
43static const s8 iwlagn_default_queue_to_tx_fifo[] = { 43#define IWL_AC_UNSET -1
44 IWL_TX_FIFO_VO, 44
45 IWL_TX_FIFO_VI, 45struct queue_to_fifo_ac {
46 IWL_TX_FIFO_BE, 46 s8 fifo, ac;
47 IWL_TX_FIFO_BK, 47};
48 IWLAGN_CMD_FIFO_NUM, 48
49 IWL_TX_FIFO_UNUSED, 49static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
50 IWL_TX_FIFO_UNUSED, 50 { IWL_TX_FIFO_VO, 0, },
51 IWL_TX_FIFO_UNUSED, 51 { IWL_TX_FIFO_VI, 1, },
52 IWL_TX_FIFO_UNUSED, 52 { IWL_TX_FIFO_BE, 2, },
53 IWL_TX_FIFO_UNUSED, 53 { IWL_TX_FIFO_BK, 3, },
54 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
55 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
56 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
57 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
58 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
59 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
54}; 60};
55 61
56static const s8 iwlagn_ipan_queue_to_tx_fifo[] = { 62static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
57 IWL_TX_FIFO_VO, 63 { IWL_TX_FIFO_VO, 0, },
58 IWL_TX_FIFO_VI, 64 { IWL_TX_FIFO_VI, 1, },
59 IWL_TX_FIFO_BE, 65 { IWL_TX_FIFO_BE, 2, },
60 IWL_TX_FIFO_BK, 66 { IWL_TX_FIFO_BK, 3, },
61 IWL_TX_FIFO_BK_IPAN, 67 { IWL_TX_FIFO_BK_IPAN, 3, },
62 IWL_TX_FIFO_BE_IPAN, 68 { IWL_TX_FIFO_BE_IPAN, 2, },
63 IWL_TX_FIFO_VI_IPAN, 69 { IWL_TX_FIFO_VI_IPAN, 1, },
64 IWL_TX_FIFO_VO_IPAN, 70 { IWL_TX_FIFO_VO_IPAN, 0, },
65 IWL_TX_FIFO_BE_IPAN, 71 { IWL_TX_FIFO_BE_IPAN, 2, },
66 IWLAGN_CMD_FIFO_NUM, 72 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
67}; 73};
68 74
69static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { 75static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
@@ -429,7 +435,7 @@ void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
429 435
430int iwlagn_alive_notify(struct iwl_priv *priv) 436int iwlagn_alive_notify(struct iwl_priv *priv)
431{ 437{
432 const s8 *queues; 438 const struct queue_to_fifo_ac *queue_to_fifo;
433 u32 a; 439 u32 a;
434 unsigned long flags; 440 unsigned long flags;
435 int i, chan; 441 int i, chan;
@@ -492,9 +498,9 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
492 498
493 /* map queues to FIFOs */ 499 /* map queues to FIFOs */
494 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) 500 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
495 queues = iwlagn_ipan_queue_to_tx_fifo; 501 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
496 else 502 else
497 queues = iwlagn_default_queue_to_tx_fifo; 503 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
498 504
499 iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0); 505 iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
500 506
@@ -510,14 +516,17 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
510 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10); 516 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
511 517
512 for (i = 0; i < 10; i++) { 518 for (i = 0; i < 10; i++) {
513 int ac = queues[i]; 519 int fifo = queue_to_fifo[i].fifo;
520 int ac = queue_to_fifo[i].ac;
514 521
515 iwl_txq_ctx_activate(priv, i); 522 iwl_txq_ctx_activate(priv, i);
516 523
517 if (ac == IWL_TX_FIFO_UNUSED) 524 if (fifo == IWL_TX_FIFO_UNUSED)
518 continue; 525 continue;
519 526
520 iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 527 if (ac != IWL_AC_UNSET)
528 iwl_set_swq_id(&priv->txq[i], ac, i);
529 iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
521 } 530 }
522 531
523 spin_unlock_irqrestore(&priv->lock, flags); 532 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index c2636a7ab9e..5b96b0d8009 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -90,170 +90,6 @@ MODULE_ALIAS("iwl4965");
90static int iwlagn_ant_coupling; 90static int iwlagn_ant_coupling;
91static bool iwlagn_bt_ch_announce = 1; 91static bool iwlagn_bt_ch_announce = 1;
92 92
93/**
94 * iwlagn_commit_rxon - commit staging_rxon to hardware
95 *
96 * The RXON command in staging_rxon is committed to the hardware and
97 * the active_rxon structure is updated with the new data. This
98 * function correctly transitions out of the RXON_ASSOC_MSK state if
99 * a HW tune is required based on the RXON structure changes.
100 */
101int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
102{
103 /* cast away the const for active_rxon in this function */
104 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
105 int ret;
106 bool new_assoc =
107 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
108 bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
109
110 if (!iwl_is_alive(priv))
111 return -EBUSY;
112
113 if (!ctx->is_active)
114 return 0;
115
116 /* always get timestamp with Rx frame */
117 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
118
119 ret = iwl_check_rxon_cmd(priv, ctx);
120 if (ret) {
121 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
122 return -EINVAL;
123 }
124
125 /*
126 * receive commit_rxon request
127 * abort any previous channel switch if still in process
128 */
129 if (priv->switch_rxon.switch_in_progress &&
130 (priv->switch_rxon.channel != ctx->staging.channel)) {
131 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
132 le16_to_cpu(priv->switch_rxon.channel));
133 iwl_chswitch_done(priv, false);
134 }
135
136 /* If we don't need to send a full RXON, we can use
137 * iwl_rxon_assoc_cmd which is used to reconfigure filter
138 * and other flags for the current radio configuration. */
139 if (!iwl_full_rxon_required(priv, ctx)) {
140 ret = iwl_send_rxon_assoc(priv, ctx);
141 if (ret) {
142 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
143 return ret;
144 }
145
146 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
147 iwl_print_rx_config_cmd(priv, ctx);
148 return 0;
149 }
150
151 /* If we are currently associated and the new config requires
152 * an RXON_ASSOC and the new config wants the associated mask enabled,
153 * we must clear the associated from the active configuration
154 * before we apply the new config */
155 if (iwl_is_associated_ctx(ctx) && new_assoc) {
156 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
157 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
158
159 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
160 sizeof(struct iwl_rxon_cmd),
161 active_rxon);
162
163 /* If the mask clearing failed then we set
164 * active_rxon back to what it was previously */
165 if (ret) {
166 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
167 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
168 return ret;
169 }
170 iwl_clear_ucode_stations(priv, ctx);
171 iwl_restore_stations(priv, ctx);
172 ret = iwl_restore_default_wep_keys(priv, ctx);
173 if (ret) {
174 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
175 return ret;
176 }
177 }
178
179 IWL_DEBUG_INFO(priv, "Sending RXON\n"
180 "* with%s RXON_FILTER_ASSOC_MSK\n"
181 "* channel = %d\n"
182 "* bssid = %pM\n",
183 (new_assoc ? "" : "out"),
184 le16_to_cpu(ctx->staging.channel),
185 ctx->staging.bssid_addr);
186
187 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
188
189 if (!old_assoc) {
190 /*
191 * First of all, before setting associated, we need to
192 * send RXON timing so the device knows about the DTIM
193 * period and other timing values
194 */
195 ret = iwl_send_rxon_timing(priv, ctx);
196 if (ret) {
197 IWL_ERR(priv, "Error setting RXON timing!\n");
198 return ret;
199 }
200 }
201
202 if (priv->cfg->ops->hcmd->set_pan_params) {
203 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
204 if (ret)
205 return ret;
206 }
207
208 /* Apply the new configuration
209 * RXON unassoc clears the station table in uCode so restoration of
210 * stations is needed after it (the RXON command) completes
211 */
212 if (!new_assoc) {
213 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
214 sizeof(struct iwl_rxon_cmd), &ctx->staging);
215 if (ret) {
216 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
217 return ret;
218 }
219 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
220 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
221 iwl_clear_ucode_stations(priv, ctx);
222 iwl_restore_stations(priv, ctx);
223 ret = iwl_restore_default_wep_keys(priv, ctx);
224 if (ret) {
225 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
226 return ret;
227 }
228 }
229 if (new_assoc) {
230 priv->start_calib = 0;
231 /* Apply the new configuration
232 * RXON assoc doesn't clear the station table in uCode,
233 */
234 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
235 sizeof(struct iwl_rxon_cmd), &ctx->staging);
236 if (ret) {
237 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
238 return ret;
239 }
240 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
241 }
242 iwl_print_rx_config_cmd(priv, ctx);
243
244 iwl_init_sensitivity(priv);
245
246 /* If we issue a new RXON command which required a tune then we must
247 * send a new TXPOWER command or we won't be able to Tx any frames */
248 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
249 if (ret) {
250 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
251 return ret;
252 }
253
254 return 0;
255}
256
257void iwl_update_chain_flags(struct iwl_priv *priv) 93void iwl_update_chain_flags(struct iwl_priv *priv)
258{ 94{
259 struct iwl_rxon_context *ctx; 95 struct iwl_rxon_context *ctx;
@@ -261,7 +97,8 @@ void iwl_update_chain_flags(struct iwl_priv *priv)
261 if (priv->cfg->ops->hcmd->set_rxon_chain) { 97 if (priv->cfg->ops->hcmd->set_rxon_chain) {
262 for_each_context(priv, ctx) { 98 for_each_context(priv, ctx) {
263 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 99 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
264 iwlcore_commit_rxon(priv, ctx); 100 if (ctx->active.rx_chain != ctx->staging.rx_chain)
101 iwlcore_commit_rxon(priv, ctx);
265 } 102 }
266 } 103 }
267} 104}
@@ -411,7 +248,8 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
411 248
412 return sizeof(*tx_beacon_cmd) + frame_size; 249 return sizeof(*tx_beacon_cmd) + frame_size;
413} 250}
414static int iwl_send_beacon_cmd(struct iwl_priv *priv) 251
252int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
415{ 253{
416 struct iwl_frame *frame; 254 struct iwl_frame *frame;
417 unsigned int frame_size; 255 unsigned int frame_size;
@@ -661,7 +499,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
661 499
662 priv->beacon_skb = beacon; 500 priv->beacon_skb = beacon;
663 501
664 iwl_send_beacon_cmd(priv); 502 iwlagn_send_beacon_cmd(priv);
665 out: 503 out:
666 mutex_unlock(&priv->mutex); 504 mutex_unlock(&priv->mutex);
667} 505}
@@ -2879,6 +2717,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
2879 2717
2880 iwl_reset_run_time_calib(priv); 2718 iwl_reset_run_time_calib(priv);
2881 2719
2720 set_bit(STATUS_READY, &priv->status);
2721
2882 /* Configure the adapter for unassociated operation */ 2722 /* Configure the adapter for unassociated operation */
2883 iwlcore_commit_rxon(priv, ctx); 2723 iwlcore_commit_rxon(priv, ctx);
2884 2724
@@ -2888,7 +2728,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2888 iwl_leds_init(priv); 2728 iwl_leds_init(priv);
2889 2729
2890 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2730 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2891 set_bit(STATUS_READY, &priv->status);
2892 wake_up_interruptible(&priv->wait_command_queue); 2731 wake_up_interruptible(&priv->wait_command_queue);
2893 2732
2894 iwl_power_update_mode(priv, true); 2733 iwl_power_update_mode(priv, true);
@@ -2978,7 +2817,8 @@ static void __iwl_down(struct iwl_priv *priv)
2978 STATUS_EXIT_PENDING; 2817 STATUS_EXIT_PENDING;
2979 2818
2980 /* device going down, Stop using ICT table */ 2819 /* device going down, Stop using ICT table */
2981 iwl_disable_ict(priv); 2820 if (priv->cfg->ops->lib->isr_ops.disable)
2821 priv->cfg->ops->lib->isr_ops.disable(priv);
2982 2822
2983 iwlagn_txq_ctx_stop(priv); 2823 iwlagn_txq_ctx_stop(priv);
2984 iwlagn_rxq_stop(priv); 2824 iwlagn_rxq_stop(priv);
@@ -3201,7 +3041,8 @@ static void iwl_bg_alive_start(struct work_struct *data)
3201 return; 3041 return;
3202 3042
3203 /* enable dram interrupt */ 3043 /* enable dram interrupt */
3204 iwl_reset_ict(priv); 3044 if (priv->cfg->ops->lib->isr_ops.reset)
3045 priv->cfg->ops->lib->isr_ops.reset(priv);
3205 3046
3206 mutex_lock(&priv->mutex); 3047 mutex_lock(&priv->mutex);
3207 iwl_alive_start(priv); 3048 iwl_alive_start(priv);
@@ -3309,92 +3150,6 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
3309 mutex_unlock(&priv->mutex); 3150 mutex_unlock(&priv->mutex);
3310} 3151}
3311 3152
3312#define IWL_DELAY_NEXT_SCAN (HZ*2)
3313
3314void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3315{
3316 struct iwl_rxon_context *ctx;
3317 struct ieee80211_conf *conf = NULL;
3318 int ret = 0;
3319
3320 if (!vif || !priv->is_open)
3321 return;
3322
3323 ctx = iwl_rxon_ctx_from_vif(vif);
3324
3325 if (vif->type == NL80211_IFTYPE_AP) {
3326 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3327 return;
3328 }
3329
3330 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3331 return;
3332
3333 iwl_scan_cancel_timeout(priv, 200);
3334
3335 conf = ieee80211_get_hw_conf(priv->hw);
3336
3337 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3338 iwlcore_commit_rxon(priv, ctx);
3339
3340 ret = iwl_send_rxon_timing(priv, ctx);
3341 if (ret)
3342 IWL_WARN(priv, "RXON timing - "
3343 "Attempting to continue.\n");
3344
3345 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3346
3347 iwl_set_rxon_ht(priv, &priv->current_ht_config);
3348
3349 if (priv->cfg->ops->hcmd->set_rxon_chain)
3350 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
3351
3352 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
3353
3354 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3355 vif->bss_conf.aid, vif->bss_conf.beacon_int);
3356
3357 if (vif->bss_conf.use_short_preamble)
3358 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3359 else
3360 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3361
3362 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3363 if (vif->bss_conf.use_short_slot)
3364 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3365 else
3366 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3367 }
3368
3369 iwlcore_commit_rxon(priv, ctx);
3370
3371 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3372 vif->bss_conf.aid, ctx->active.bssid_addr);
3373
3374 switch (vif->type) {
3375 case NL80211_IFTYPE_STATION:
3376 break;
3377 case NL80211_IFTYPE_ADHOC:
3378 iwl_send_beacon_cmd(priv);
3379 break;
3380 default:
3381 IWL_ERR(priv, "%s Should not be called in %d mode\n",
3382 __func__, vif->type);
3383 break;
3384 }
3385
3386 /* the chain noise calibration will enabled PM upon completion
3387 * If chain noise has already been run, then we need to enable
3388 * power management here */
3389 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
3390 iwl_power_update_mode(priv, false);
3391
3392 /* Enable Rx differential gain and sensitivity calibrations */
3393 iwl_chain_noise_reset(priv);
3394 priv->start_calib = 1;
3395
3396}
3397
3398/***************************************************************************** 3153/*****************************************************************************
3399 * 3154 *
3400 * mac80211 entry point functions 3155 * mac80211 entry point functions
@@ -3474,7 +3229,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3474} 3229}
3475 3230
3476 3231
3477static int iwl_mac_start(struct ieee80211_hw *hw) 3232int iwlagn_mac_start(struct ieee80211_hw *hw)
3478{ 3233{
3479 struct iwl_priv *priv = hw->priv; 3234 struct iwl_priv *priv = hw->priv;
3480 int ret; 3235 int ret;
@@ -3515,7 +3270,7 @@ out:
3515 return 0; 3270 return 0;
3516} 3271}
3517 3272
3518static void iwl_mac_stop(struct ieee80211_hw *hw) 3273void iwlagn_mac_stop(struct ieee80211_hw *hw)
3519{ 3274{
3520 struct iwl_priv *priv = hw->priv; 3275 struct iwl_priv *priv = hw->priv;
3521 3276
@@ -3537,7 +3292,7 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
3537 IWL_DEBUG_MAC80211(priv, "leave\n"); 3292 IWL_DEBUG_MAC80211(priv, "leave\n");
3538} 3293}
3539 3294
3540static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 3295int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3541{ 3296{
3542 struct iwl_priv *priv = hw->priv; 3297 struct iwl_priv *priv = hw->priv;
3543 3298
@@ -3553,73 +3308,12 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3553 return NETDEV_TX_OK; 3308 return NETDEV_TX_OK;
3554} 3309}
3555 3310
3556void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) 3311void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
3557{ 3312 struct ieee80211_vif *vif,
3558 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 3313 struct ieee80211_key_conf *keyconf,
3559 int ret = 0; 3314 struct ieee80211_sta *sta,
3560 3315 u32 iv32, u16 *phase1key)
3561 lockdep_assert_held(&priv->mutex);
3562
3563 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3564 return;
3565
3566 /* The following should be done only at AP bring up */
3567 if (!iwl_is_associated_ctx(ctx)) {
3568
3569 /* RXON - unassoc (to set timing command) */
3570 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3571 iwlcore_commit_rxon(priv, ctx);
3572
3573 /* RXON Timing */
3574 ret = iwl_send_rxon_timing(priv, ctx);
3575 if (ret)
3576 IWL_WARN(priv, "RXON timing failed - "
3577 "Attempting to continue.\n");
3578
3579 /* AP has all antennas */
3580 priv->chain_noise_data.active_chains =
3581 priv->hw_params.valid_rx_ant;
3582 iwl_set_rxon_ht(priv, &priv->current_ht_config);
3583 if (priv->cfg->ops->hcmd->set_rxon_chain)
3584 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
3585
3586 ctx->staging.assoc_id = 0;
3587
3588 if (vif->bss_conf.use_short_preamble)
3589 ctx->staging.flags |=
3590 RXON_FLG_SHORT_PREAMBLE_MSK;
3591 else
3592 ctx->staging.flags &=
3593 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3594
3595 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3596 if (vif->bss_conf.use_short_slot)
3597 ctx->staging.flags |=
3598 RXON_FLG_SHORT_SLOT_MSK;
3599 else
3600 ctx->staging.flags &=
3601 ~RXON_FLG_SHORT_SLOT_MSK;
3602 }
3603 /* need to send beacon cmd before committing assoc RXON! */
3604 iwl_send_beacon_cmd(priv);
3605 /* restore RXON assoc */
3606 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3607 iwlcore_commit_rxon(priv, ctx);
3608 }
3609 iwl_send_beacon_cmd(priv);
3610
3611 /* FIXME - we need to add code here to detect a totally new
3612 * configuration, reset the AP, unassoc, rxon timing, assoc,
3613 * clear sta table, add BCAST sta... */
3614}
3615
3616static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
3617 struct ieee80211_vif *vif,
3618 struct ieee80211_key_conf *keyconf,
3619 struct ieee80211_sta *sta,
3620 u32 iv32, u16 *phase1key)
3621{ 3316{
3622
3623 struct iwl_priv *priv = hw->priv; 3317 struct iwl_priv *priv = hw->priv;
3624 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 3318 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3625 3319
@@ -3631,10 +3325,9 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
3631 IWL_DEBUG_MAC80211(priv, "leave\n"); 3325 IWL_DEBUG_MAC80211(priv, "leave\n");
3632} 3326}
3633 3327
3634static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3328int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3635 struct ieee80211_vif *vif, 3329 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
3636 struct ieee80211_sta *sta, 3330 struct ieee80211_key_conf *key)
3637 struct ieee80211_key_conf *key)
3638{ 3331{
3639 struct iwl_priv *priv = hw->priv; 3332 struct iwl_priv *priv = hw->priv;
3640 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 3333 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -3701,10 +3394,10 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3701 return ret; 3394 return ret;
3702} 3395}
3703 3396
3704static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, 3397int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3705 struct ieee80211_vif *vif, 3398 struct ieee80211_vif *vif,
3706 enum ieee80211_ampdu_mlme_action action, 3399 enum ieee80211_ampdu_mlme_action action,
3707 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 3400 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3708{ 3401{
3709 struct iwl_priv *priv = hw->priv; 3402 struct iwl_priv *priv = hw->priv;
3710 int ret = -EINVAL; 3403 int ret = -EINVAL;
@@ -3785,39 +3478,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3785 return ret; 3478 return ret;
3786} 3479}
3787 3480
3788static void iwl_mac_sta_notify(struct ieee80211_hw *hw, 3481int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3789 struct ieee80211_vif *vif, 3482 struct ieee80211_vif *vif,
3790 enum sta_notify_cmd cmd, 3483 struct ieee80211_sta *sta)
3791 struct ieee80211_sta *sta)
3792{
3793 struct iwl_priv *priv = hw->priv;
3794 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
3795 int sta_id;
3796
3797 switch (cmd) {
3798 case STA_NOTIFY_SLEEP:
3799 WARN_ON(!sta_priv->client);
3800 sta_priv->asleep = true;
3801 if (atomic_read(&sta_priv->pending_frames) > 0)
3802 ieee80211_sta_block_awake(hw, sta, true);
3803 break;
3804 case STA_NOTIFY_AWAKE:
3805 WARN_ON(!sta_priv->client);
3806 if (!sta_priv->asleep)
3807 break;
3808 sta_priv->asleep = false;
3809 sta_id = iwl_sta_id(sta);
3810 if (sta_id != IWL_INVALID_STATION)
3811 iwl_sta_modify_ps_wake(priv, sta_id);
3812 break;
3813 default:
3814 break;
3815 }
3816}
3817
3818static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3819 struct ieee80211_vif *vif,
3820 struct ieee80211_sta *sta)
3821{ 3484{
3822 struct iwl_priv *priv = hw->priv; 3485 struct iwl_priv *priv = hw->priv;
3823 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 3486 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
@@ -3858,8 +3521,8 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3858 return 0; 3521 return 0;
3859} 3522}
3860 3523
3861static void iwl_mac_channel_switch(struct ieee80211_hw *hw, 3524void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
3862 struct ieee80211_channel_switch *ch_switch) 3525 struct ieee80211_channel_switch *ch_switch)
3863{ 3526{
3864 struct iwl_priv *priv = hw->priv; 3527 struct iwl_priv *priv = hw->priv;
3865 const struct iwl_channel_info *ch_info; 3528 const struct iwl_channel_info *ch_info;
@@ -3956,10 +3619,10 @@ out_exit:
3956 IWL_DEBUG_MAC80211(priv, "leave\n"); 3619 IWL_DEBUG_MAC80211(priv, "leave\n");
3957} 3620}
3958 3621
3959static void iwlagn_configure_filter(struct ieee80211_hw *hw, 3622void iwlagn_configure_filter(struct ieee80211_hw *hw,
3960 unsigned int changed_flags, 3623 unsigned int changed_flags,
3961 unsigned int *total_flags, 3624 unsigned int *total_flags,
3962 u64 multicast) 3625 u64 multicast)
3963{ 3626{
3964 struct iwl_priv *priv = hw->priv; 3627 struct iwl_priv *priv = hw->priv;
3965 __le32 filter_or = 0, filter_nand = 0; 3628 __le32 filter_or = 0, filter_nand = 0;
@@ -3986,7 +3649,11 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3986 for_each_context(priv, ctx) { 3649 for_each_context(priv, ctx) {
3987 ctx->staging.filter_flags &= ~filter_nand; 3650 ctx->staging.filter_flags &= ~filter_nand;
3988 ctx->staging.filter_flags |= filter_or; 3651 ctx->staging.filter_flags |= filter_or;
3989 iwlcore_commit_rxon(priv, ctx); 3652
3653 /*
3654 * Not committing directly because hardware can perform a scan,
3655 * but we'll eventually commit the filter flags change anyway.
3656 */
3990 } 3657 }
3991 3658
3992 mutex_unlock(&priv->mutex); 3659 mutex_unlock(&priv->mutex);
@@ -4001,7 +3668,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
4001 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 3668 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
4002} 3669}
4003 3670
4004static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop) 3671void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
4005{ 3672{
4006 struct iwl_priv *priv = hw->priv; 3673 struct iwl_priv *priv = hw->priv;
4007 3674
@@ -4172,13 +3839,13 @@ static int iwl_init_drv(struct iwl_priv *priv)
4172 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF; 3839 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
4173 priv->bt_duration = BT_DURATION_LIMIT_DEF; 3840 priv->bt_duration = BT_DURATION_LIMIT_DEF;
4174 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF; 3841 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
4175 priv->dynamic_agg_thresh = BT_AGG_THRESHOLD_DEF;
4176 } 3842 }
4177 3843
4178 /* Set the tx_power_user_lmt to the lowest power level 3844 /* Set the tx_power_user_lmt to the lowest power level
4179 * this value will get overwritten by channel max power avg 3845 * this value will get overwritten by channel max power avg
4180 * from eeprom */ 3846 * from eeprom */
4181 priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN; 3847 priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
3848 priv->tx_power_next = IWLAGN_TX_POWER_TARGET_POWER_MIN;
4182 3849
4183 ret = iwl_init_channel_map(priv); 3850 ret = iwl_init_channel_map(priv);
4184 if (ret) { 3851 if (ret) {
@@ -4209,28 +3876,30 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
4209 kfree(priv->scan_cmd); 3876 kfree(priv->scan_cmd);
4210} 3877}
4211 3878
4212static struct ieee80211_ops iwl_hw_ops = { 3879#ifdef CONFIG_IWL5000
4213 .tx = iwl_mac_tx, 3880struct ieee80211_ops iwlagn_hw_ops = {
4214 .start = iwl_mac_start, 3881 .tx = iwlagn_mac_tx,
4215 .stop = iwl_mac_stop, 3882 .start = iwlagn_mac_start,
3883 .stop = iwlagn_mac_stop,
4216 .add_interface = iwl_mac_add_interface, 3884 .add_interface = iwl_mac_add_interface,
4217 .remove_interface = iwl_mac_remove_interface, 3885 .remove_interface = iwl_mac_remove_interface,
4218 .config = iwl_mac_config, 3886 .change_interface = iwl_mac_change_interface,
3887 .config = iwlagn_mac_config,
4219 .configure_filter = iwlagn_configure_filter, 3888 .configure_filter = iwlagn_configure_filter,
4220 .set_key = iwl_mac_set_key, 3889 .set_key = iwlagn_mac_set_key,
4221 .update_tkip_key = iwl_mac_update_tkip_key, 3890 .update_tkip_key = iwlagn_mac_update_tkip_key,
4222 .conf_tx = iwl_mac_conf_tx, 3891 .conf_tx = iwl_mac_conf_tx,
4223 .reset_tsf = iwl_mac_reset_tsf, 3892 .bss_info_changed = iwlagn_bss_info_changed,
4224 .bss_info_changed = iwl_bss_info_changed, 3893 .ampdu_action = iwlagn_mac_ampdu_action,
4225 .ampdu_action = iwl_mac_ampdu_action,
4226 .hw_scan = iwl_mac_hw_scan, 3894 .hw_scan = iwl_mac_hw_scan,
4227 .sta_notify = iwl_mac_sta_notify, 3895 .sta_notify = iwlagn_mac_sta_notify,
4228 .sta_add = iwlagn_mac_sta_add, 3896 .sta_add = iwlagn_mac_sta_add,
4229 .sta_remove = iwl_mac_sta_remove, 3897 .sta_remove = iwl_mac_sta_remove,
4230 .channel_switch = iwl_mac_channel_switch, 3898 .channel_switch = iwlagn_mac_channel_switch,
4231 .flush = iwl_mac_flush, 3899 .flush = iwlagn_mac_flush,
4232 .tx_last_beacon = iwl_mac_tx_last_beacon, 3900 .tx_last_beacon = iwl_mac_tx_last_beacon,
4233}; 3901};
3902#endif
4234 3903
4235static void iwl_hw_detect(struct iwl_priv *priv) 3904static void iwl_hw_detect(struct iwl_priv *priv)
4236{ 3905{
@@ -4298,10 +3967,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4298 if (cfg->mod_params->disable_hw_scan) { 3967 if (cfg->mod_params->disable_hw_scan) {
4299 dev_printk(KERN_DEBUG, &(pdev->dev), 3968 dev_printk(KERN_DEBUG, &(pdev->dev),
4300 "sw scan support is deprecated\n"); 3969 "sw scan support is deprecated\n");
4301 iwl_hw_ops.hw_scan = NULL; 3970#ifdef CONFIG_IWL5000
3971 iwlagn_hw_ops.hw_scan = NULL;
3972#endif
3973#ifdef CONFIG_IWL4965
3974 iwl4965_hw_ops.hw_scan = NULL;
3975#endif
4302 } 3976 }
4303 3977
4304 hw = iwl_alloc_all(cfg, &iwl_hw_ops); 3978 hw = iwl_alloc_all(cfg);
4305 if (!hw) { 3979 if (!hw) {
4306 err = -ENOMEM; 3980 err = -ENOMEM;
4307 goto out; 3981 goto out;
@@ -4333,6 +4007,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4333 BIT(NL80211_IFTYPE_ADHOC); 4007 BIT(NL80211_IFTYPE_ADHOC);
4334 priv->contexts[IWL_RXON_CTX_BSS].interface_modes = 4008 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
4335 BIT(NL80211_IFTYPE_STATION); 4009 BIT(NL80211_IFTYPE_STATION);
4010 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
4336 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; 4011 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
4337 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; 4012 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
4338 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; 4013 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
@@ -4461,6 +4136,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4461 if (err) 4136 if (err)
4462 goto out_free_eeprom; 4137 goto out_free_eeprom;
4463 4138
4139 err = iwl_eeprom_check_sku(priv);
4140 if (err)
4141 goto out_free_eeprom;
4142
4464 /* extract MAC Address */ 4143 /* extract MAC Address */
4465 iwl_eeprom_get_mac(priv, priv->addresses[0].addr); 4144 iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
4466 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); 4145 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
@@ -4500,8 +4179,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4500 4179
4501 pci_enable_msi(priv->pci_dev); 4180 pci_enable_msi(priv->pci_dev);
4502 4181
4503 iwl_alloc_isr_ict(priv); 4182 if (priv->cfg->ops->lib->isr_ops.alloc)
4504 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr, 4183 priv->cfg->ops->lib->isr_ops.alloc(priv);
4184
4185 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
4505 IRQF_SHARED, DRV_NAME, priv); 4186 IRQF_SHARED, DRV_NAME, priv);
4506 if (err) { 4187 if (err) {
4507 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); 4188 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4548,7 +4229,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4548 destroy_workqueue(priv->workqueue); 4229 destroy_workqueue(priv->workqueue);
4549 priv->workqueue = NULL; 4230 priv->workqueue = NULL;
4550 free_irq(priv->pci_dev->irq, priv); 4231 free_irq(priv->pci_dev->irq, priv);
4551 iwl_free_isr_ict(priv); 4232 if (priv->cfg->ops->lib->isr_ops.free)
4233 priv->cfg->ops->lib->isr_ops.free(priv);
4552 out_disable_msi: 4234 out_disable_msi:
4553 pci_disable_msi(priv->pci_dev); 4235 pci_disable_msi(priv->pci_dev);
4554 iwl_uninit_drv(priv); 4236 iwl_uninit_drv(priv);
@@ -4643,7 +4325,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4643 4325
4644 iwl_uninit_drv(priv); 4326 iwl_uninit_drv(priv);
4645 4327
4646 iwl_free_isr_ict(priv); 4328 if (priv->cfg->ops->lib->isr_ops.free)
4329 priv->cfg->ops->lib->isr_ops.free(priv);
4647 4330
4648 dev_kfree_skb(priv->beacon_skb); 4331 dev_kfree_skb(priv->beacon_skb);
4649 4332
@@ -4735,13 +4418,6 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4735 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, 4418 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
4736 4419
4737/* 6x00 Series Gen2a */ 4420/* 6x00 Series Gen2a */
4738 {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
4739 {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
4740 {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
4741 {IWL_PCI_DEVICE(0x0082, 0x1206, iwl6000g2a_2abg_cfg)},
4742 {IWL_PCI_DEVICE(0x0085, 0x1216, iwl6000g2a_2abg_cfg)},
4743 {IWL_PCI_DEVICE(0x0082, 0x1226, iwl6000g2a_2abg_cfg)},
4744 {IWL_PCI_DEVICE(0x0082, 0x1207, iwl6000g2a_2bg_cfg)},
4745 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)}, 4421 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)},
4746 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)}, 4422 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)},
4747 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)}, 4423 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)},
@@ -4751,24 +4427,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4751 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)}, 4427 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)},
4752 4428
4753/* 6x00 Series Gen2b */ 4429/* 6x00 Series Gen2b */
4754 {IWL_PCI_DEVICE(0x008F, 0x5105, iwl6000g2b_bgn_cfg)},
4755 {IWL_PCI_DEVICE(0x0090, 0x5115, iwl6000g2b_bgn_cfg)},
4756 {IWL_PCI_DEVICE(0x008F, 0x5125, iwl6000g2b_bgn_cfg)},
4757 {IWL_PCI_DEVICE(0x008F, 0x5107, iwl6000g2b_bg_cfg)},
4758 {IWL_PCI_DEVICE(0x008F, 0x5201, iwl6000g2b_2agn_cfg)},
4759 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
4760 {IWL_PCI_DEVICE(0x008F, 0x5221, iwl6000g2b_2agn_cfg)},
4761 {IWL_PCI_DEVICE(0x008F, 0x5206, iwl6000g2b_2abg_cfg)},
4762 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
4763 {IWL_PCI_DEVICE(0x008F, 0x5226, iwl6000g2b_2abg_cfg)},
4764 {IWL_PCI_DEVICE(0x008F, 0x5207, iwl6000g2b_2bg_cfg)},
4765 {IWL_PCI_DEVICE(0x008A, 0x5301, iwl6000g2b_bgn_cfg)},
4766 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)}, 4430 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)},
4767 {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)}, 4431 {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)},
4768 {IWL_PCI_DEVICE(0x008A, 0x5321, iwl6000g2b_bgn_cfg)},
4769 {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)}, 4432 {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)},
4770 {IWL_PCI_DEVICE(0x008B, 0x5311, iwl6000g2b_bgn_cfg)}, 4433 {IWL_PCI_DEVICE(0x008A, 0x5327, iwl6000g2b_bg_cfg)},
4771 {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)}, 4434 {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)},
4435 {IWL_PCI_DEVICE(0x008B, 0x5317, iwl6000g2b_bg_cfg)},
4772 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)}, 4436 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
4773 {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)}, 4437 {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)},
4774 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)}, 4438 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
@@ -4812,10 +4476,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4812 4476
4813/* 100 Series WiFi */ 4477/* 100 Series WiFi */
4814 {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, 4478 {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
4479 {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
4815 {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, 4480 {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
4481 {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
4816 {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, 4482 {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
4817 {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, 4483 {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
4818 {IWL_PCI_DEVICE(0x08AE, 0x1017, iwl100_bg_cfg)},
4819 4484
4820/* 130 Series WiFi */ 4485/* 130 Series WiFi */
4821 {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, 4486 {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
@@ -4836,10 +4501,7 @@ static struct pci_driver iwl_driver = {
4836 .id_table = iwl_hw_card_ids, 4501 .id_table = iwl_hw_card_ids,
4837 .probe = iwl_pci_probe, 4502 .probe = iwl_pci_probe,
4838 .remove = __devexit_p(iwl_pci_remove), 4503 .remove = __devexit_p(iwl_pci_remove),
4839#ifdef CONFIG_PM 4504 .driver.pm = IWL_PM_OPS,
4840 .suspend = iwl_pci_suspend,
4841 .resume = iwl_pci_resume,
4842#endif
4843}; 4505};
4844 4506
4845static int __init iwl_init(void) 4507static int __init iwl_init(void)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index f525d55f2c0..28837a185a2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -102,6 +102,9 @@ extern struct iwl_hcmd_ops iwlagn_hcmd;
102extern struct iwl_hcmd_ops iwlagn_bt_hcmd; 102extern struct iwl_hcmd_ops iwlagn_bt_hcmd;
103extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils; 103extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
104 104
105extern struct ieee80211_ops iwlagn_hw_ops;
106extern struct ieee80211_ops iwl4965_hw_ops;
107
105int iwl_reset_ict(struct iwl_priv *priv); 108int iwl_reset_ict(struct iwl_priv *priv);
106void iwl_disable_ict(struct iwl_priv *priv); 109void iwl_disable_ict(struct iwl_priv *priv);
107int iwl_alloc_isr_ict(struct iwl_priv *priv); 110int iwl_alloc_isr_ict(struct iwl_priv *priv);
@@ -132,6 +135,11 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
132/* RXON */ 135/* RXON */
133int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 136int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
134void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 137void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
138int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
139void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
140 struct ieee80211_vif *vif,
141 struct ieee80211_bss_conf *bss_conf,
142 u32 changes);
135 143
136/* uCode */ 144/* uCode */
137int iwlagn_load_ucode(struct iwl_priv *priv); 145int iwlagn_load_ucode(struct iwl_priv *priv);
@@ -249,6 +257,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
249int iwlagn_send_rxon_assoc(struct iwl_priv *priv, 257int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
250 struct iwl_rxon_context *ctx); 258 struct iwl_rxon_context *ctx);
251int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant); 259int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
260int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
252 261
253/* bt coex */ 262/* bt coex */
254void iwlagn_send_advance_bt_config(struct iwl_priv *priv); 263void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
@@ -292,9 +301,12 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
292 int tid, u16 ssn); 301 int tid, u16 ssn);
293int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, 302int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
294 int tid); 303 int tid);
295void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
296void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt); 304void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
297int iwl_update_bcast_stations(struct iwl_priv *priv); 305int iwl_update_bcast_stations(struct iwl_priv *priv);
306void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
307 struct ieee80211_vif *vif,
308 enum sta_notify_cmd cmd,
309 struct ieee80211_sta *sta);
298 310
299/* rate */ 311/* rate */
300static inline u32 iwl_ant_idx_to_flags(u8 ant_idx) 312static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -318,4 +330,31 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
318int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); 330int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
319void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv); 331void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
320 332
333/* mac80211 handlers (for 4965) */
334int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
335int iwlagn_mac_start(struct ieee80211_hw *hw);
336void iwlagn_mac_stop(struct ieee80211_hw *hw);
337void iwlagn_configure_filter(struct ieee80211_hw *hw,
338 unsigned int changed_flags,
339 unsigned int *total_flags,
340 u64 multicast);
341int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
342 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
343 struct ieee80211_key_conf *key);
344void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
345 struct ieee80211_vif *vif,
346 struct ieee80211_key_conf *keyconf,
347 struct ieee80211_sta *sta,
348 u32 iv32, u16 *phase1key);
349int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
350 struct ieee80211_vif *vif,
351 enum ieee80211_ampdu_mlme_action action,
352 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
353int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
354 struct ieee80211_vif *vif,
355 struct ieee80211_sta *sta);
356void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
357 struct ieee80211_channel_switch *ch_switch);
358void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
359
321#endif /* __iwl_agn_h__ */ 360#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 424801abc80..c9448cba1e2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -2022,6 +2022,9 @@ struct iwl_compressed_ba_resp {
2022 __le64 bitmap; 2022 __le64 bitmap;
2023 __le16 scd_flow; 2023 __le16 scd_flow;
2024 __le16 scd_ssn; 2024 __le16 scd_ssn;
2025 /* following only for 5000 series and up */
2026 u8 txed; /* number of frames sent */
2027 u8 txed_2_done; /* number of frames acked */
2025} __packed; 2028} __packed;
2026 2029
2027/* 2030/*
@@ -2407,9 +2410,9 @@ struct iwl_link_quality_cmd {
2407#define BT_FRAG_THRESHOLD_MAX 0 2410#define BT_FRAG_THRESHOLD_MAX 0
2408#define BT_FRAG_THRESHOLD_MIN 0 2411#define BT_FRAG_THRESHOLD_MIN 0
2409 2412
2410#define BT_AGG_THRESHOLD_DEF 0 2413#define BT_AGG_THRESHOLD_DEF 1200
2411#define BT_AGG_THRESHOLD_MAX 0 2414#define BT_AGG_THRESHOLD_MAX 8000
2412#define BT_AGG_THRESHOLD_MIN 0 2415#define BT_AGG_THRESHOLD_MIN 400
2413 2416
2414/* 2417/*
2415 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 2418 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
@@ -2436,8 +2439,9 @@ struct iwl_bt_cmd {
2436#define IWLAGN_BT_FLAG_COEX_MODE_3W 2 2439#define IWLAGN_BT_FLAG_COEX_MODE_3W 2
2437#define IWLAGN_BT_FLAG_COEX_MODE_4W 3 2440#define IWLAGN_BT_FLAG_COEX_MODE_4W 3
2438 2441
2439#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6) 2442#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6)
2440#define IWLAGN_BT_FLAG_NOCOEX_NOTIF BIT(7) 2443/* Disable Sync PSPoll on SCO/eSCO */
2444#define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE BIT(7)
2441 2445
2442#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF 2446#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
2443#define IWLAGN_BT_PRIO_BOOST_MIN 0x00 2447#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
@@ -2447,8 +2451,8 @@ struct iwl_bt_cmd {
2447 2451
2448#define IWLAGN_BT3_T7_DEFAULT 1 2452#define IWLAGN_BT3_T7_DEFAULT 1
2449 2453
2450#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffffffff) 2454#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000)
2451#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffffffff) 2455#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000)
2452 2456
2453#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2 2457#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2
2454 2458
@@ -2664,9 +2668,16 @@ struct iwl_spectrum_notification {
2664#define IWL_POWER_VEC_SIZE 5 2668#define IWL_POWER_VEC_SIZE 5
2665 2669
2666#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) 2670#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2671#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
2672#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
2667#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2)) 2673#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2668#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) 2674#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2669#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4)) 2675#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
2676#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
2677#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
2678#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
2679#define IWL_POWER_BT_SCO_ENA cpu_to_le16(BIT(8))
2680#define IWL_POWER_ADVANCE_PM_ENA_MSK cpu_to_le16(BIT(9))
2670 2681
2671struct iwl3945_powertable_cmd { 2682struct iwl3945_powertable_cmd {
2672 __le16 flags; 2683 __le16 flags;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 25fb3912342..c41f5a87821 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -77,15 +77,15 @@ EXPORT_SYMBOL(iwl_bcast_addr);
77 77
78 78
79/* This function both allocates and initializes hw and priv. */ 79/* This function both allocates and initializes hw and priv. */
80struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 80struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
81 struct ieee80211_ops *hw_ops)
82{ 81{
83 struct iwl_priv *priv; 82 struct iwl_priv *priv;
84
85 /* mac80211 allocates memory for this device instance, including 83 /* mac80211 allocates memory for this device instance, including
86 * space for this driver's private structure */ 84 * space for this driver's private structure */
87 struct ieee80211_hw *hw = 85 struct ieee80211_hw *hw;
88 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops); 86
87 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
88 cfg->ops->ieee80211_ops);
89 if (hw == NULL) { 89 if (hw == NULL) {
90 pr_err("%s: Can not allocate network device\n", 90 pr_err("%s: Can not allocate network device\n",
91 cfg->name); 91 cfg->name);
@@ -100,35 +100,6 @@ out:
100} 100}
101EXPORT_SYMBOL(iwl_alloc_all); 101EXPORT_SYMBOL(iwl_alloc_all);
102 102
103/*
104 * QoS support
105*/
106static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
107{
108 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
109 return;
110
111 if (!ctx->is_active)
112 return;
113
114 ctx->qos_data.def_qos_parm.qos_flags = 0;
115
116 if (ctx->qos_data.qos_active)
117 ctx->qos_data.def_qos_parm.qos_flags |=
118 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
119
120 if (ctx->ht.enabled)
121 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
122
123 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
124 ctx->qos_data.qos_active,
125 ctx->qos_data.def_qos_parm.qos_flags);
126
127 iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
128 sizeof(struct iwl_qosparam_cmd),
129 &ctx->qos_data.def_qos_parm, NULL);
130}
131
132#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 103#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
133#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 104#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
134static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, 105static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
@@ -317,40 +288,6 @@ void iwlcore_free_geos(struct iwl_priv *priv)
317} 288}
318EXPORT_SYMBOL(iwlcore_free_geos); 289EXPORT_SYMBOL(iwlcore_free_geos);
319 290
320/*
321 * iwlcore_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
322 * function.
323 */
324void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
325 struct ieee80211_tx_info *info,
326 __le16 fc, __le32 *tx_flags)
327{
328 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
329 *tx_flags |= TX_CMD_FLG_RTS_MSK;
330 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
331 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
332
333 if (!ieee80211_is_mgmt(fc))
334 return;
335
336 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
337 case cpu_to_le16(IEEE80211_STYPE_AUTH):
338 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
339 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
340 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
341 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
342 *tx_flags |= TX_CMD_FLG_CTS_MSK;
343 break;
344 }
345 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
346 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
347 *tx_flags |= TX_CMD_FLG_CTS_MSK;
348 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
349 }
350}
351EXPORT_SYMBOL(iwlcore_tx_cmd_protection);
352
353
354static bool iwl_is_channel_extension(struct iwl_priv *priv, 291static bool iwl_is_channel_extension(struct iwl_priv *priv,
355 enum ieee80211_band band, 292 enum ieee80211_band band,
356 u16 channel, u8 extension_chan_offset) 293 u16 channel, u8 extension_chan_offset)
@@ -1206,8 +1143,16 @@ EXPORT_SYMBOL(iwl_apm_init);
1206 1143
1207int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 1144int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1208{ 1145{
1209 int ret = 0; 1146 int ret;
1210 s8 prev_tx_power = priv->tx_power_user_lmt; 1147 s8 prev_tx_power;
1148
1149 lockdep_assert_held(&priv->mutex);
1150
1151 if (priv->tx_power_user_lmt == tx_power && !force)
1152 return 0;
1153
1154 if (!priv->cfg->ops->lib->send_tx_power)
1155 return -EOPNOTSUPP;
1211 1156
1212 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) { 1157 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
1213 IWL_WARN(priv, 1158 IWL_WARN(priv,
@@ -1224,93 +1169,29 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1224 return -EINVAL; 1169 return -EINVAL;
1225 } 1170 }
1226 1171
1227 if (priv->tx_power_user_lmt != tx_power) 1172 if (!iwl_is_ready_rf(priv))
1228 force = true; 1173 return -EIO;
1229 1174
1230 /* if nic is not up don't send command */ 1175 /* scan complete use tx_power_next, need to be updated */
1231 if (iwl_is_ready_rf(priv)) { 1176 priv->tx_power_next = tx_power;
1232 priv->tx_power_user_lmt = tx_power; 1177 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
1233 if (force && priv->cfg->ops->lib->send_tx_power) 1178 IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
1234 ret = priv->cfg->ops->lib->send_tx_power(priv); 1179 return 0;
1235 else if (!priv->cfg->ops->lib->send_tx_power)
1236 ret = -EOPNOTSUPP;
1237 /*
1238 * if fail to set tx_power, restore the orig. tx power
1239 */
1240 if (ret)
1241 priv->tx_power_user_lmt = prev_tx_power;
1242 } 1180 }
1243 1181
1244 /* 1182 prev_tx_power = priv->tx_power_user_lmt;
1245 * Even this is an async host command, the command 1183 priv->tx_power_user_lmt = tx_power;
1246 * will always report success from uCode
1247 * So once driver can placing the command into the queue
1248 * successfully, driver can use priv->tx_power_user_lmt
1249 * to reflect the current tx power
1250 */
1251 return ret;
1252}
1253EXPORT_SYMBOL(iwl_set_tx_power);
1254
1255irqreturn_t iwl_isr_legacy(int irq, void *data)
1256{
1257 struct iwl_priv *priv = data;
1258 u32 inta, inta_mask;
1259 u32 inta_fh;
1260 unsigned long flags;
1261 if (!priv)
1262 return IRQ_NONE;
1263 1184
1264 spin_lock_irqsave(&priv->lock, flags); 1185 ret = priv->cfg->ops->lib->send_tx_power(priv);
1265 1186
1266 /* Disable (but don't clear!) interrupts here to avoid 1187 /* if fail to set tx_power, restore the orig. tx power */
1267 * back-to-back ISRs and sporadic interrupts from our NIC. 1188 if (ret) {
1268 * If we have something to service, the tasklet will re-enable ints. 1189 priv->tx_power_user_lmt = prev_tx_power;
1269 * If we *don't* have something, we'll re-enable before leaving here. */ 1190 priv->tx_power_next = prev_tx_power;
1270 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
1271 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1272
1273 /* Discover which interrupts are active/pending */
1274 inta = iwl_read32(priv, CSR_INT);
1275 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1276
1277 /* Ignore interrupt if there's nothing in NIC to service.
1278 * This may be due to IRQ shared with another device,
1279 * or due to sporadic interrupts thrown from our NIC. */
1280 if (!inta && !inta_fh) {
1281 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n");
1282 goto none;
1283 } 1191 }
1284 1192 return ret;
1285 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1286 /* Hardware disappeared. It might have already raised
1287 * an interrupt */
1288 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1289 goto unplugged;
1290 }
1291
1292 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1293 inta, inta_mask, inta_fh);
1294
1295 inta &= ~CSR_INT_BIT_SCD;
1296
1297 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1298 if (likely(inta || inta_fh))
1299 tasklet_schedule(&priv->irq_tasklet);
1300
1301 unplugged:
1302 spin_unlock_irqrestore(&priv->lock, flags);
1303 return IRQ_HANDLED;
1304
1305 none:
1306 /* re-enable interrupts here since we don't have anything to service. */
1307 /* only Re-enable if diabled by irq */
1308 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1309 iwl_enable_interrupts(priv);
1310 spin_unlock_irqrestore(&priv->lock, flags);
1311 return IRQ_NONE;
1312} 1193}
1313EXPORT_SYMBOL(iwl_isr_legacy); 1194EXPORT_SYMBOL(iwl_set_tx_power);
1314 1195
1315void iwl_send_bt_config(struct iwl_priv *priv) 1196void iwl_send_bt_config(struct iwl_priv *priv)
1316{ 1197{
@@ -1452,318 +1333,51 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1452} 1333}
1453EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon); 1334EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
1454 1335
1455static void iwl_ht_conf(struct iwl_priv *priv, 1336static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1456 struct ieee80211_vif *vif)
1457{ 1337{
1458 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 1338 iwl_connection_init_rx_config(priv, ctx);
1459 struct ieee80211_sta *sta;
1460 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1461 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1462
1463 IWL_DEBUG_MAC80211(priv, "enter:\n");
1464
1465 if (!ctx->ht.enabled)
1466 return;
1467
1468 ctx->ht.protection =
1469 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
1470 ctx->ht.non_gf_sta_present =
1471 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1472
1473 ht_conf->single_chain_sufficient = false;
1474
1475 switch (vif->type) {
1476 case NL80211_IFTYPE_STATION:
1477 rcu_read_lock();
1478 sta = ieee80211_find_sta(vif, bss_conf->bssid);
1479 if (sta) {
1480 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1481 int maxstreams;
1482
1483 maxstreams = (ht_cap->mcs.tx_params &
1484 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
1485 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1486 maxstreams += 1;
1487
1488 if ((ht_cap->mcs.rx_mask[1] == 0) &&
1489 (ht_cap->mcs.rx_mask[2] == 0))
1490 ht_conf->single_chain_sufficient = true;
1491 if (maxstreams <= 1)
1492 ht_conf->single_chain_sufficient = true;
1493 } else {
1494 /*
1495 * If at all, this can only happen through a race
1496 * when the AP disconnects us while we're still
1497 * setting up the connection, in that case mac80211
1498 * will soon tell us about that.
1499 */
1500 ht_conf->single_chain_sufficient = true;
1501 }
1502 rcu_read_unlock();
1503 break;
1504 case NL80211_IFTYPE_ADHOC:
1505 ht_conf->single_chain_sufficient = true;
1506 break;
1507 default:
1508 break;
1509 }
1510
1511 IWL_DEBUG_MAC80211(priv, "leave\n");
1512}
1513 1339
1514static inline void iwl_set_no_assoc(struct iwl_priv *priv, 1340 if (priv->cfg->ops->hcmd->set_rxon_chain)
1515 struct ieee80211_vif *vif) 1341 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1516{
1517 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1518 1342
1519 iwl_led_disassociate(priv); 1343 return iwlcore_commit_rxon(priv, ctx);
1520 /*
1521 * inform the ucode that there is no longer an
1522 * association and that no more packets should be
1523 * sent
1524 */
1525 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1526 ctx->staging.assoc_id = 0;
1527 iwlcore_commit_rxon(priv, ctx);
1528} 1344}
1529 1345
1530static void iwlcore_beacon_update(struct ieee80211_hw *hw, 1346static int iwl_setup_interface(struct iwl_priv *priv,
1531 struct ieee80211_vif *vif) 1347 struct iwl_rxon_context *ctx)
1532{ 1348{
1533 struct iwl_priv *priv = hw->priv; 1349 struct ieee80211_vif *vif = ctx->vif;
1534 unsigned long flags; 1350 int err;
1535 __le64 timestamp;
1536 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
1537
1538 if (!skb)
1539 return;
1540
1541 IWL_DEBUG_ASSOC(priv, "enter\n");
1542 1351
1543 lockdep_assert_held(&priv->mutex); 1352 lockdep_assert_held(&priv->mutex);
1544 1353
1545 if (!priv->beacon_ctx) {
1546 IWL_ERR(priv, "update beacon but no beacon context!\n");
1547 dev_kfree_skb(skb);
1548 return;
1549 }
1550
1551 spin_lock_irqsave(&priv->lock, flags);
1552
1553 if (priv->beacon_skb)
1554 dev_kfree_skb(priv->beacon_skb);
1555
1556 priv->beacon_skb = skb;
1557
1558 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
1559 priv->timestamp = le64_to_cpu(timestamp);
1560
1561 IWL_DEBUG_ASSOC(priv, "leave\n");
1562
1563 spin_unlock_irqrestore(&priv->lock, flags);
1564
1565 if (!iwl_is_ready_rf(priv)) {
1566 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1567 return;
1568 }
1569
1570 priv->cfg->ops->lib->post_associate(priv, priv->beacon_ctx->vif);
1571}
1572
1573void iwl_bss_info_changed(struct ieee80211_hw *hw,
1574 struct ieee80211_vif *vif,
1575 struct ieee80211_bss_conf *bss_conf,
1576 u32 changes)
1577{
1578 struct iwl_priv *priv = hw->priv;
1579 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1580 int ret;
1581
1582 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
1583
1584 if (!iwl_is_alive(priv))
1585 return;
1586
1587 mutex_lock(&priv->mutex);
1588
1589 if (changes & BSS_CHANGED_QOS) {
1590 unsigned long flags;
1591
1592 spin_lock_irqsave(&priv->lock, flags);
1593 ctx->qos_data.qos_active = bss_conf->qos;
1594 iwl_update_qos(priv, ctx);
1595 spin_unlock_irqrestore(&priv->lock, flags);
1596 }
1597
1598 if (changes & BSS_CHANGED_BEACON_ENABLED) {
1599 /*
1600 * the add_interface code must make sure we only ever
1601 * have a single interface that could be beaconing at
1602 * any time.
1603 */
1604 if (vif->bss_conf.enable_beacon)
1605 priv->beacon_ctx = ctx;
1606 else
1607 priv->beacon_ctx = NULL;
1608 }
1609
1610 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
1611 dev_kfree_skb(priv->beacon_skb);
1612 priv->beacon_skb = ieee80211_beacon_get(hw, vif);
1613 }
1614
1615 if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
1616 iwl_send_rxon_timing(priv, ctx);
1617
1618 if (changes & BSS_CHANGED_BSSID) {
1619 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
1620
1621 /*
1622 * If there is currently a HW scan going on in the
1623 * background then we need to cancel it else the RXON
1624 * below/in post_associate will fail.
1625 */
1626 if (iwl_scan_cancel_timeout(priv, 100)) {
1627 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
1628 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
1629 mutex_unlock(&priv->mutex);
1630 return;
1631 }
1632
1633 /* mac80211 only sets assoc when in STATION mode */
1634 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
1635 memcpy(ctx->staging.bssid_addr,
1636 bss_conf->bssid, ETH_ALEN);
1637
1638 /* currently needed in a few places */
1639 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1640 } else {
1641 ctx->staging.filter_flags &=
1642 ~RXON_FILTER_ASSOC_MSK;
1643 }
1644
1645 }
1646
1647 /* 1354 /*
1648 * This needs to be after setting the BSSID in case 1355 * This variable will be correct only when there's just
1649 * mac80211 decides to do both changes at once because 1356 * a single context, but all code using it is for hardware
1650 * it will invoke post_associate. 1357 * that supports only one context.
1651 */ 1358 */
1652 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON) 1359 priv->iw_mode = vif->type;
1653 iwlcore_beacon_update(hw, vif);
1654
1655 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
1656 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
1657 bss_conf->use_short_preamble);
1658 if (bss_conf->use_short_preamble)
1659 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1660 else
1661 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1662 }
1663
1664 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
1665 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
1666 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
1667 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
1668 else
1669 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1670 if (bss_conf->use_cts_prot)
1671 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1672 else
1673 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1674 }
1675
1676 if (changes & BSS_CHANGED_BASIC_RATES) {
1677 /* XXX use this information
1678 *
1679 * To do that, remove code from iwl_set_rate() and put something
1680 * like this here:
1681 *
1682 if (A-band)
1683 ctx->staging.ofdm_basic_rates =
1684 bss_conf->basic_rates;
1685 else
1686 ctx->staging.ofdm_basic_rates =
1687 bss_conf->basic_rates >> 4;
1688 ctx->staging.cck_basic_rates =
1689 bss_conf->basic_rates & 0xF;
1690 */
1691 }
1692
1693 if (changes & BSS_CHANGED_HT) {
1694 iwl_ht_conf(priv, vif);
1695
1696 if (priv->cfg->ops->hcmd->set_rxon_chain)
1697 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1698 }
1699
1700 if (changes & BSS_CHANGED_ASSOC) {
1701 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
1702 if (bss_conf->assoc) {
1703 priv->timestamp = bss_conf->timestamp;
1704
1705 iwl_led_associate(priv);
1706
1707 if (!iwl_is_rfkill(priv))
1708 priv->cfg->ops->lib->post_associate(priv, vif);
1709 } else
1710 iwl_set_no_assoc(priv, vif);
1711 }
1712
1713 if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
1714 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
1715 changes);
1716 ret = iwl_send_rxon_assoc(priv, ctx);
1717 if (!ret) {
1718 /* Sync active_rxon with latest change. */
1719 memcpy((void *)&ctx->active,
1720 &ctx->staging,
1721 sizeof(struct iwl_rxon_cmd));
1722 }
1723 }
1724 1360
1725 if (changes & BSS_CHANGED_BEACON_ENABLED) { 1361 ctx->is_active = true;
1726 if (vif->bss_conf.enable_beacon) {
1727 memcpy(ctx->staging.bssid_addr,
1728 bss_conf->bssid, ETH_ALEN);
1729 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1730 iwl_led_associate(priv);
1731 iwlcore_config_ap(priv, vif);
1732 } else
1733 iwl_set_no_assoc(priv, vif);
1734 }
1735 1362
1736 if (changes & BSS_CHANGED_IBSS) { 1363 err = iwl_set_mode(priv, ctx);
1737 ret = priv->cfg->ops->lib->manage_ibss_station(priv, vif, 1364 if (err) {
1738 bss_conf->ibss_joined); 1365 if (!ctx->always_active)
1739 if (ret) 1366 ctx->is_active = false;
1740 IWL_ERR(priv, "failed to %s IBSS station %pM\n", 1367 return err;
1741 bss_conf->ibss_joined ? "add" : "remove",
1742 bss_conf->bssid);
1743 } 1368 }
1744 1369
1745 if (changes & BSS_CHANGED_IDLE && 1370 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
1746 priv->cfg->ops->hcmd->set_pan_params) { 1371 vif->type == NL80211_IFTYPE_ADHOC) {
1747 if (priv->cfg->ops->hcmd->set_pan_params(priv)) 1372 /*
1748 IWL_ERR(priv, "failed to update PAN params\n"); 1373 * pretend to have high BT traffic as long as we
1374 * are operating in IBSS mode, as this will cause
1375 * the rate scaling etc. to behave as intended.
1376 */
1377 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1749 } 1378 }
1750 1379
1751 mutex_unlock(&priv->mutex); 1380 return 0;
1752
1753 IWL_DEBUG_MAC80211(priv, "leave\n");
1754}
1755EXPORT_SYMBOL(iwl_bss_info_changed);
1756
1757static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
1758{
1759 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1760
1761 iwl_connection_init_rx_config(priv, ctx);
1762
1763 if (priv->cfg->ops->hcmd->set_rxon_chain)
1764 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1765
1766 return iwlcore_commit_rxon(priv, ctx);
1767} 1381}
1768 1382
1769int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1383int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
@@ -1771,7 +1385,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1771 struct iwl_priv *priv = hw->priv; 1385 struct iwl_priv *priv = hw->priv;
1772 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1386 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1773 struct iwl_rxon_context *tmp, *ctx = NULL; 1387 struct iwl_rxon_context *tmp, *ctx = NULL;
1774 int err = 0; 1388 int err;
1775 1389
1776 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", 1390 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1777 vif->type, vif->addr); 1391 vif->type, vif->addr);
@@ -1813,36 +1427,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1813 1427
1814 vif_priv->ctx = ctx; 1428 vif_priv->ctx = ctx;
1815 ctx->vif = vif; 1429 ctx->vif = vif;
1816 /*
1817 * This variable will be correct only when there's just
1818 * a single context, but all code using it is for hardware
1819 * that supports only one context.
1820 */
1821 priv->iw_mode = vif->type;
1822
1823 ctx->is_active = true;
1824
1825 err = iwl_set_mode(priv, vif);
1826 if (err) {
1827 if (!ctx->always_active)
1828 ctx->is_active = false;
1829 goto out_err;
1830 }
1831
1832 if (priv->cfg->bt_params &&
1833 priv->cfg->bt_params->advanced_bt_coexist &&
1834 vif->type == NL80211_IFTYPE_ADHOC) {
1835 /*
1836 * pretend to have high BT traffic as long as we
1837 * are operating in IBSS mode, as this will cause
1838 * the rate scaling etc. to behave as intended.
1839 */
1840 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1841 }
1842 1430
1843 goto out; 1431 err = iwl_setup_interface(priv, ctx);
1432 if (!err)
1433 goto out;
1844 1434
1845 out_err:
1846 ctx->vif = NULL; 1435 ctx->vif = NULL;
1847 priv->iw_mode = NL80211_IFTYPE_STATION; 1436 priv->iw_mode = NL80211_IFTYPE_STATION;
1848 out: 1437 out:
@@ -1853,27 +1442,24 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1853} 1442}
1854EXPORT_SYMBOL(iwl_mac_add_interface); 1443EXPORT_SYMBOL(iwl_mac_add_interface);
1855 1444
1856void iwl_mac_remove_interface(struct ieee80211_hw *hw, 1445static void iwl_teardown_interface(struct iwl_priv *priv,
1857 struct ieee80211_vif *vif) 1446 struct ieee80211_vif *vif,
1447 bool mode_change)
1858{ 1448{
1859 struct iwl_priv *priv = hw->priv;
1860 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1449 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1861 1450
1862 IWL_DEBUG_MAC80211(priv, "enter\n"); 1451 lockdep_assert_held(&priv->mutex);
1863
1864 mutex_lock(&priv->mutex);
1865
1866 WARN_ON(ctx->vif != vif);
1867 ctx->vif = NULL;
1868 1452
1869 if (priv->scan_vif == vif) { 1453 if (priv->scan_vif == vif) {
1870 iwl_scan_cancel_timeout(priv, 200); 1454 iwl_scan_cancel_timeout(priv, 200);
1871 iwl_force_scan_end(priv); 1455 iwl_force_scan_end(priv);
1872 } 1456 }
1873 iwl_set_mode(priv, vif);
1874 1457
1875 if (!ctx->always_active) 1458 if (!mode_change) {
1876 ctx->is_active = false; 1459 iwl_set_mode(priv, ctx);
1460 if (!ctx->always_active)
1461 ctx->is_active = false;
1462 }
1877 1463
1878 /* 1464 /*
1879 * When removing the IBSS interface, overwrite the 1465 * When removing the IBSS interface, overwrite the
@@ -1883,211 +1469,31 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1883 * both values are the same and zero. 1469 * both values are the same and zero.
1884 */ 1470 */
1885 if (vif->type == NL80211_IFTYPE_ADHOC) 1471 if (vif->type == NL80211_IFTYPE_ADHOC)
1886 priv->bt_traffic_load = priv->notif_bt_traffic_load; 1472 priv->bt_traffic_load = priv->last_bt_traffic_load;
1887
1888 memset(priv->bssid, 0, ETH_ALEN);
1889 mutex_unlock(&priv->mutex);
1890
1891 IWL_DEBUG_MAC80211(priv, "leave\n");
1892
1893} 1473}
1894EXPORT_SYMBOL(iwl_mac_remove_interface);
1895
1896/**
1897 * iwl_mac_config - mac80211 config callback
1898 */
1899int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
1900{
1901 struct iwl_priv *priv = hw->priv;
1902 const struct iwl_channel_info *ch_info;
1903 struct ieee80211_conf *conf = &hw->conf;
1904 struct ieee80211_channel *channel = conf->channel;
1905 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1906 struct iwl_rxon_context *ctx;
1907 unsigned long flags = 0;
1908 int ret = 0;
1909 u16 ch;
1910 int scan_active = 0;
1911
1912 mutex_lock(&priv->mutex);
1913
1914 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
1915 channel->hw_value, changed);
1916
1917 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
1918 test_bit(STATUS_SCANNING, &priv->status))) {
1919 scan_active = 1;
1920 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
1921 }
1922
1923 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
1924 IEEE80211_CONF_CHANGE_CHANNEL)) {
1925 /* mac80211 uses static for non-HT which is what we want */
1926 priv->current_ht_config.smps = conf->smps_mode;
1927
1928 /*
1929 * Recalculate chain counts.
1930 *
1931 * If monitor mode is enabled then mac80211 will
1932 * set up the SM PS mode to OFF if an HT channel is
1933 * configured.
1934 */
1935 if (priv->cfg->ops->hcmd->set_rxon_chain)
1936 for_each_context(priv, ctx)
1937 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1938 }
1939
1940 /* during scanning mac80211 will delay channel setting until
1941 * scan finish with changed = 0
1942 */
1943 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
1944 if (scan_active)
1945 goto set_ch_out;
1946
1947 ch = channel->hw_value;
1948 ch_info = iwl_get_channel_info(priv, channel->band, ch);
1949 if (!is_channel_valid(ch_info)) {
1950 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
1951 ret = -EINVAL;
1952 goto set_ch_out;
1953 }
1954 1474
1955 spin_lock_irqsave(&priv->lock, flags); 1475void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1956 1476 struct ieee80211_vif *vif)
1957 for_each_context(priv, ctx) {
1958 /* Configure HT40 channels */
1959 ctx->ht.enabled = conf_is_ht(conf);
1960 if (ctx->ht.enabled) {
1961 if (conf_is_ht40_minus(conf)) {
1962 ctx->ht.extension_chan_offset =
1963 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1964 ctx->ht.is_40mhz = true;
1965 } else if (conf_is_ht40_plus(conf)) {
1966 ctx->ht.extension_chan_offset =
1967 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1968 ctx->ht.is_40mhz = true;
1969 } else {
1970 ctx->ht.extension_chan_offset =
1971 IEEE80211_HT_PARAM_CHA_SEC_NONE;
1972 ctx->ht.is_40mhz = false;
1973 }
1974 } else
1975 ctx->ht.is_40mhz = false;
1976
1977 /*
1978 * Default to no protection. Protection mode will
1979 * later be set from BSS config in iwl_ht_conf
1980 */
1981 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
1982
1983 /* if we are switching from ht to 2.4 clear flags
1984 * from any ht related info since 2.4 does not
1985 * support ht */
1986 if ((le16_to_cpu(ctx->staging.channel) != ch))
1987 ctx->staging.flags = 0;
1988
1989 iwl_set_rxon_channel(priv, channel, ctx);
1990 iwl_set_rxon_ht(priv, ht_conf);
1991
1992 iwl_set_flags_for_band(priv, ctx, channel->band,
1993 ctx->vif);
1994 }
1995
1996 spin_unlock_irqrestore(&priv->lock, flags);
1997
1998 if (priv->cfg->ops->lib->update_bcast_stations)
1999 ret = priv->cfg->ops->lib->update_bcast_stations(priv);
2000
2001 set_ch_out:
2002 /* The list of supported rates and rate mask can be different
2003 * for each band; since the band may have changed, reset
2004 * the rate mask to what mac80211 lists */
2005 iwl_set_rate(priv);
2006 }
2007
2008 if (changed & (IEEE80211_CONF_CHANGE_PS |
2009 IEEE80211_CONF_CHANGE_IDLE)) {
2010 ret = iwl_power_update_mode(priv, false);
2011 if (ret)
2012 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2013 }
2014
2015 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2016 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2017 priv->tx_power_user_lmt, conf->power_level);
2018
2019 iwl_set_tx_power(priv, conf->power_level, false);
2020 }
2021
2022 if (!iwl_is_ready(priv)) {
2023 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2024 goto out;
2025 }
2026
2027 if (scan_active)
2028 goto out;
2029
2030 for_each_context(priv, ctx) {
2031 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2032 iwlcore_commit_rxon(priv, ctx);
2033 else
2034 IWL_DEBUG_INFO(priv,
2035 "Not re-sending same RXON configuration.\n");
2036 }
2037
2038out:
2039 IWL_DEBUG_MAC80211(priv, "leave\n");
2040 mutex_unlock(&priv->mutex);
2041 return ret;
2042}
2043EXPORT_SYMBOL(iwl_mac_config);
2044
2045void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2046{ 1477{
2047 struct iwl_priv *priv = hw->priv; 1478 struct iwl_priv *priv = hw->priv;
2048 unsigned long flags; 1479 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
2049 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2050 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2051 1480
2052 mutex_lock(&priv->mutex);
2053 IWL_DEBUG_MAC80211(priv, "enter\n"); 1481 IWL_DEBUG_MAC80211(priv, "enter\n");
2054 1482
2055 spin_lock_irqsave(&priv->lock, flags); 1483 mutex_lock(&priv->mutex);
2056 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2057 spin_unlock_irqrestore(&priv->lock, flags);
2058
2059 spin_lock_irqsave(&priv->lock, flags);
2060
2061 /* new association get rid of ibss beacon skb */
2062 if (priv->beacon_skb)
2063 dev_kfree_skb(priv->beacon_skb);
2064
2065 priv->beacon_skb = NULL;
2066
2067 priv->timestamp = 0;
2068
2069 spin_unlock_irqrestore(&priv->lock, flags);
2070
2071 iwl_scan_cancel_timeout(priv, 100);
2072 if (!iwl_is_ready_rf(priv)) {
2073 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2074 mutex_unlock(&priv->mutex);
2075 return;
2076 }
2077 1484
2078 /* we are restarting association process 1485 WARN_ON(ctx->vif != vif);
2079 * clear RXON_FILTER_ASSOC_MSK bit 1486 ctx->vif = NULL;
2080 */
2081 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2082 iwlcore_commit_rxon(priv, ctx);
2083 1487
2084 iwl_set_rate(priv); 1488 iwl_teardown_interface(priv, vif, false);
2085 1489
1490 memset(priv->bssid, 0, ETH_ALEN);
2086 mutex_unlock(&priv->mutex); 1491 mutex_unlock(&priv->mutex);
2087 1492
2088 IWL_DEBUG_MAC80211(priv, "leave\n"); 1493 IWL_DEBUG_MAC80211(priv, "leave\n");
1494
2089} 1495}
2090EXPORT_SYMBOL(iwl_mac_reset_tsf); 1496EXPORT_SYMBOL(iwl_mac_remove_interface);
2091 1497
2092int iwl_alloc_txq_mem(struct iwl_priv *priv) 1498int iwl_alloc_txq_mem(struct iwl_priv *priv)
2093{ 1499{
@@ -2431,6 +1837,63 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
2431 return 0; 1837 return 0;
2432} 1838}
2433 1839
1840int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1841 enum nl80211_iftype newtype, bool newp2p)
1842{
1843 struct iwl_priv *priv = hw->priv;
1844 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1845 struct iwl_rxon_context *tmp;
1846 u32 interface_modes;
1847 int err;
1848
1849 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1850
1851 mutex_lock(&priv->mutex);
1852
1853 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1854
1855 if (!(interface_modes & BIT(newtype))) {
1856 err = -EBUSY;
1857 goto out;
1858 }
1859
1860 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1861 for_each_context(priv, tmp) {
1862 if (ctx == tmp)
1863 continue;
1864
1865 if (!tmp->vif)
1866 continue;
1867
1868 /*
1869 * The current mode switch would be exclusive, but
1870 * another context is active ... refuse the switch.
1871 */
1872 err = -EBUSY;
1873 goto out;
1874 }
1875 }
1876
1877 /* success */
1878 iwl_teardown_interface(priv, vif, true);
1879 vif->type = newtype;
1880 err = iwl_setup_interface(priv, ctx);
1881 WARN_ON(err);
1882 /*
1883 * We've switched internally, but submitting to the
1884 * device may have failed for some reason. Mask this
1885 * error, because otherwise mac80211 will not switch
1886 * (and set the interface type back) and we'll be
1887 * out of sync with it.
1888 */
1889 err = 0;
1890
1891 out:
1892 mutex_unlock(&priv->mutex);
1893 return err;
1894}
1895EXPORT_SYMBOL(iwl_mac_change_interface);
1896
2434/** 1897/**
2435 * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover 1898 * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
2436 * 1899 *
@@ -2584,8 +2047,9 @@ EXPORT_SYMBOL(iwl_add_beacon_time);
2584 2047
2585#ifdef CONFIG_PM 2048#ifdef CONFIG_PM
2586 2049
2587int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) 2050int iwl_pci_suspend(struct device *device)
2588{ 2051{
2052 struct pci_dev *pdev = to_pci_dev(device);
2589 struct iwl_priv *priv = pci_get_drvdata(pdev); 2053 struct iwl_priv *priv = pci_get_drvdata(pdev);
2590 2054
2591 /* 2055 /*
@@ -2597,18 +2061,14 @@ int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2597 */ 2061 */
2598 iwl_apm_stop(priv); 2062 iwl_apm_stop(priv);
2599 2063
2600 pci_save_state(pdev);
2601 pci_disable_device(pdev);
2602 pci_set_power_state(pdev, PCI_D3hot);
2603
2604 return 0; 2064 return 0;
2605} 2065}
2606EXPORT_SYMBOL(iwl_pci_suspend); 2066EXPORT_SYMBOL(iwl_pci_suspend);
2607 2067
2608int iwl_pci_resume(struct pci_dev *pdev) 2068int iwl_pci_resume(struct device *device)
2609{ 2069{
2070 struct pci_dev *pdev = to_pci_dev(device);
2610 struct iwl_priv *priv = pci_get_drvdata(pdev); 2071 struct iwl_priv *priv = pci_get_drvdata(pdev);
2611 int ret;
2612 bool hw_rfkill = false; 2072 bool hw_rfkill = false;
2613 2073
2614 /* 2074 /*
@@ -2617,11 +2077,6 @@ int iwl_pci_resume(struct pci_dev *pdev)
2617 */ 2077 */
2618 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 2078 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2619 2079
2620 pci_set_power_state(pdev, PCI_D0);
2621 ret = pci_enable_device(pdev);
2622 if (ret)
2623 return ret;
2624 pci_restore_state(pdev);
2625 iwl_enable_interrupts(priv); 2080 iwl_enable_interrupts(priv);
2626 2081
2627 if (!(iwl_read32(priv, CSR_GP_CNTRL) & 2082 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
@@ -2639,4 +2094,14 @@ int iwl_pci_resume(struct pci_dev *pdev)
2639} 2094}
2640EXPORT_SYMBOL(iwl_pci_resume); 2095EXPORT_SYMBOL(iwl_pci_resume);
2641 2096
2097const struct dev_pm_ops iwl_pm_ops = {
2098 .suspend = iwl_pci_suspend,
2099 .resume = iwl_pci_resume,
2100 .freeze = iwl_pci_suspend,
2101 .thaw = iwl_pci_resume,
2102 .poweroff = iwl_pci_suspend,
2103 .restore = iwl_pci_resume,
2104};
2105EXPORT_SYMBOL(iwl_pm_ops);
2106
2642#endif /* CONFIG_PM */ 2107#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 64527def059..808be731ecb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -120,6 +120,14 @@ struct iwl_apm_ops {
120 void (*config)(struct iwl_priv *priv); 120 void (*config)(struct iwl_priv *priv);
121}; 121};
122 122
123struct iwl_isr_ops {
124 irqreturn_t (*isr) (int irq, void *data);
125 void (*free)(struct iwl_priv *priv);
126 int (*alloc)(struct iwl_priv *priv);
127 int (*reset)(struct iwl_priv *priv);
128 void (*disable)(struct iwl_priv *priv);
129};
130
123struct iwl_debugfs_ops { 131struct iwl_debugfs_ops {
124 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf, 132 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
125 size_t count, loff_t *ppos); 133 size_t count, loff_t *ppos);
@@ -193,20 +201,15 @@ struct iwl_lib_ops {
193 /* power */ 201 /* power */
194 int (*send_tx_power) (struct iwl_priv *priv); 202 int (*send_tx_power) (struct iwl_priv *priv);
195 void (*update_chain_flags)(struct iwl_priv *priv); 203 void (*update_chain_flags)(struct iwl_priv *priv);
196 void (*post_associate)(struct iwl_priv *priv, 204
197 struct ieee80211_vif *vif); 205 /* isr */
198 void (*config_ap)(struct iwl_priv *priv, struct ieee80211_vif *vif); 206 struct iwl_isr_ops isr_ops;
199 irqreturn_t (*isr) (int irq, void *data);
200 207
201 /* eeprom operations (as defined in iwl-eeprom.h) */ 208 /* eeprom operations (as defined in iwl-eeprom.h) */
202 struct iwl_eeprom_ops eeprom_ops; 209 struct iwl_eeprom_ops eeprom_ops;
203 210
204 /* temperature */ 211 /* temperature */
205 struct iwl_temp_ops temp_ops; 212 struct iwl_temp_ops temp_ops;
206 /* station management */
207 int (*manage_ibss_station)(struct iwl_priv *priv,
208 struct ieee80211_vif *vif, bool add);
209 int (*update_bcast_stations)(struct iwl_priv *priv);
210 /* recover from tx queue stall */ 213 /* recover from tx queue stall */
211 void (*recover_from_tx_stall)(unsigned long data); 214 void (*recover_from_tx_stall)(unsigned long data);
212 /* check for plcp health */ 215 /* check for plcp health */
@@ -235,12 +238,23 @@ struct iwl_nic_ops {
235 void (*additional_nic_config)(struct iwl_priv *priv); 238 void (*additional_nic_config)(struct iwl_priv *priv);
236}; 239};
237 240
241struct iwl_legacy_ops {
242 void (*post_associate)(struct iwl_priv *priv);
243 void (*config_ap)(struct iwl_priv *priv);
244 /* station management */
245 int (*update_bcast_stations)(struct iwl_priv *priv);
246 int (*manage_ibss_station)(struct iwl_priv *priv,
247 struct ieee80211_vif *vif, bool add);
248};
249
238struct iwl_ops { 250struct iwl_ops {
239 const struct iwl_lib_ops *lib; 251 const struct iwl_lib_ops *lib;
240 const struct iwl_hcmd_ops *hcmd; 252 const struct iwl_hcmd_ops *hcmd;
241 const struct iwl_hcmd_utils_ops *utils; 253 const struct iwl_hcmd_utils_ops *utils;
242 const struct iwl_led_ops *led; 254 const struct iwl_led_ops *led;
243 const struct iwl_nic_ops *nic; 255 const struct iwl_nic_ops *nic;
256 const struct iwl_legacy_ops *legacy;
257 const struct ieee80211_ops *ieee80211_ops;
244}; 258};
245 259
246struct iwl_mod_params { 260struct iwl_mod_params {
@@ -276,7 +290,10 @@ struct iwl_mod_params {
276 * sensitivity calibration operation 290 * sensitivity calibration operation
277 * @chain_noise_calib_by_driver: driver has the capability to perform 291 * @chain_noise_calib_by_driver: driver has the capability to perform
278 * chain noise calibration operation 292 * chain noise calibration operation
279*/ 293 * @shadow_reg_enable: HW shadhow register bit
294 * @no_agg_framecnt_info: uCode do not provide aggregation frame count
295 * information
296 */
280struct iwl_base_params { 297struct iwl_base_params {
281 int eeprom_size; 298 int eeprom_size;
282 int num_of_queues; /* def: HW dependent */ 299 int num_of_queues; /* def: HW dependent */
@@ -306,6 +323,8 @@ struct iwl_base_params {
306 const bool ucode_tracing; 323 const bool ucode_tracing;
307 const bool sensitivity_calib_by_driver; 324 const bool sensitivity_calib_by_driver;
308 const bool chain_noise_calib_by_driver; 325 const bool chain_noise_calib_by_driver;
326 const bool shadow_reg_enable;
327 const bool no_agg_framecnt_info;
309}; 328};
310/* 329/*
311 * @advanced_bt_coexist: support advanced bt coexist 330 * @advanced_bt_coexist: support advanced bt coexist
@@ -315,6 +334,7 @@ struct iwl_base_params {
315 * @agg_time_limit: maximum number of uSec in aggregation 334 * @agg_time_limit: maximum number of uSec in aggregation
316 * @ampdu_factor: Maximum A-MPDU length factor 335 * @ampdu_factor: Maximum A-MPDU length factor
317 * @ampdu_density: Minimum A-MPDU spacing 336 * @ampdu_density: Minimum A-MPDU spacing
337 * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
318*/ 338*/
319struct iwl_bt_params { 339struct iwl_bt_params {
320 bool advanced_bt_coexist; 340 bool advanced_bt_coexist;
@@ -324,6 +344,7 @@ struct iwl_bt_params {
324 u16 agg_time_limit; 344 u16 agg_time_limit;
325 u8 ampdu_factor; 345 u8 ampdu_factor;
326 u8 ampdu_density; 346 u8 ampdu_density;
347 bool bt_sco_disable;
327}; 348};
328/* 349/*
329 * @use_rts_for_aggregation: use rts/cts protection for HT traffic 350 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -344,6 +365,8 @@ struct iwl_ht_params {
344 * @need_dc_calib: need to perform init dc calibration 365 * @need_dc_calib: need to perform init dc calibration
345 * @need_temp_offset_calib: need to perform temperature offset calibration 366 * @need_temp_offset_calib: need to perform temperature offset calibration
346 * @scan_antennas: available antenna for scan operation 367 * @scan_antennas: available antenna for scan operation
368 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
369 * @adv_pm: advance power management
347 * 370 *
348 * We enable the driver to be backward compatible wrt API version. The 371 * We enable the driver to be backward compatible wrt API version. The
349 * driver specifies which APIs it supports (with @ucode_api_max being the 372 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -390,14 +413,15 @@ struct iwl_cfg {
390 const bool need_temp_offset_calib; /* if used set to true */ 413 const bool need_temp_offset_calib; /* if used set to true */
391 u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; 414 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
392 u8 scan_tx_antennas[IEEE80211_NUM_BANDS]; 415 u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
416 enum iwl_led_mode led_mode;
417 const bool adv_pm;
393}; 418};
394 419
395/*************************** 420/***************************
396 * L i b * 421 * L i b *
397 ***************************/ 422 ***************************/
398 423
399struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 424struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg);
400 struct ieee80211_ops *hw_ops);
401int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 425int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
402 const struct ieee80211_tx_queue_params *params); 426 const struct ieee80211_tx_queue_params *params);
403int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw); 427int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
@@ -425,23 +449,16 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
425 u32 decrypt_res, 449 u32 decrypt_res,
426 struct ieee80211_rx_status *stats); 450 struct ieee80211_rx_status *stats);
427void iwl_irq_handle_error(struct iwl_priv *priv); 451void iwl_irq_handle_error(struct iwl_priv *priv);
428void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
429void iwl_bss_info_changed(struct ieee80211_hw *hw,
430 struct ieee80211_vif *vif,
431 struct ieee80211_bss_conf *bss_conf,
432 u32 changes);
433int iwl_mac_add_interface(struct ieee80211_hw *hw, 452int iwl_mac_add_interface(struct ieee80211_hw *hw,
434 struct ieee80211_vif *vif); 453 struct ieee80211_vif *vif);
435void iwl_mac_remove_interface(struct ieee80211_hw *hw, 454void iwl_mac_remove_interface(struct ieee80211_hw *hw,
436 struct ieee80211_vif *vif); 455 struct ieee80211_vif *vif);
437int iwl_mac_config(struct ieee80211_hw *hw, u32 changed); 456int iwl_mac_change_interface(struct ieee80211_hw *hw,
438void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif); 457 struct ieee80211_vif *vif,
439void iwl_mac_reset_tsf(struct ieee80211_hw *hw); 458 enum nl80211_iftype newtype, bool newp2p);
440int iwl_alloc_txq_mem(struct iwl_priv *priv); 459int iwl_alloc_txq_mem(struct iwl_priv *priv);
441void iwl_free_txq_mem(struct iwl_priv *priv); 460void iwl_free_txq_mem(struct iwl_priv *priv);
442void iwlcore_tx_cmd_protection(struct iwl_priv *priv, 461
443 struct ieee80211_tx_info *info,
444 __le16 fc, __le32 *tx_flags);
445#ifdef CONFIG_IWLWIFI_DEBUGFS 462#ifdef CONFIG_IWLWIFI_DEBUGFS
446int iwl_alloc_traffic_mem(struct iwl_priv *priv); 463int iwl_alloc_traffic_mem(struct iwl_priv *priv);
447void iwl_free_traffic_mem(struct iwl_priv *priv); 464void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -598,7 +615,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
598/***************************************************** 615/*****************************************************
599 * PCI * 616 * PCI *
600 *****************************************************/ 617 *****************************************************/
601irqreturn_t iwl_isr_legacy(int irq, void *data);
602 618
603static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv) 619static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
604{ 620{
@@ -615,9 +631,17 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
615 u32 addon, u32 beacon_interval); 631 u32 addon, u32 beacon_interval);
616 632
617#ifdef CONFIG_PM 633#ifdef CONFIG_PM
618int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state); 634int iwl_pci_suspend(struct device *device);
619int iwl_pci_resume(struct pci_dev *pdev); 635int iwl_pci_resume(struct device *device);
620#endif /* CONFIG_PM */ 636extern const struct dev_pm_ops iwl_pm_ops;
637
638#define IWL_PM_OPS (&iwl_pm_ops)
639
640#else /* !CONFIG_PM */
641
642#define IWL_PM_OPS NULL
643
644#endif /* !CONFIG_PM */
621 645
622/***************************************************** 646/*****************************************************
623* Error Handling Debugging 647* Error Handling Debugging
@@ -724,11 +748,6 @@ static inline int iwlcore_commit_rxon(struct iwl_priv *priv,
724{ 748{
725 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx); 749 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
726} 750}
727static inline void iwlcore_config_ap(struct iwl_priv *priv,
728 struct ieee80211_vif *vif)
729{
730 priv->cfg->ops->lib->config_ap(priv, vif);
731}
732static inline const struct ieee80211_supported_band *iwl_get_hw_mode( 751static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
733 struct iwl_priv *priv, enum ieee80211_band band) 752 struct iwl_priv *priv, enum ieee80211_band band)
734{ 753{
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 2aa15ab1389..b80bf7dff55 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -132,6 +132,8 @@
132 132
133#define CSR_LED_REG (CSR_BASE+0x094) 133#define CSR_LED_REG (CSR_BASE+0x094)
134#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) 134#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
135#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */
136
135 137
136/* GIO Chicken Bits (PCI Express bus link power management) */ 138/* GIO Chicken Bits (PCI Express bus link power management) */
137#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) 139#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 8fdd4efdb1d..3cc58420d44 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -992,11 +992,8 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
992 " swq_id=%#.2x (ac %d/hwq %d)\n", 992 " swq_id=%#.2x (ac %d/hwq %d)\n",
993 cnt, q->read_ptr, q->write_ptr, 993 cnt, q->read_ptr, q->write_ptr,
994 !!test_bit(cnt, priv->queue_stopped), 994 !!test_bit(cnt, priv->queue_stopped),
995 txq->swq_id, 995 txq->swq_id, txq->swq_id & 3,
996 txq->swq_id & 0x80 ? txq->swq_id & 3 : 996 (txq->swq_id >> 2) & 0x1f);
997 txq->swq_id,
998 txq->swq_id & 0x80 ? (txq->swq_id >> 2) &
999 0x1f : txq->swq_id);
1000 if (cnt >= 4) 997 if (cnt >= 4)
1001 continue; 998 continue;
1002 /* for the ACs, display the stop count too */ 999 /* for the ACs, display the stop count too */
@@ -1580,7 +1577,7 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
1580 priv->bt_full_concurrent ? "full concurrency" : "3-wire"); 1577 priv->bt_full_concurrent ? "full concurrency" : "3-wire");
1581 pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, " 1578 pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, "
1582 "last traffic notif: %d\n", 1579 "last traffic notif: %d\n",
1583 priv->bt_status ? "On" : "Off", priv->notif_bt_traffic_load); 1580 priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
1584 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, " 1581 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
1585 "sco_active: %d, kill_ack_mask: %x, " 1582 "sco_active: %d, kill_ack_mask: %x, "
1586 "kill_cts_mask: %x\n", 1583 "kill_cts_mask: %x\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 70e07fa4840..ea81ced1375 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1162,6 +1162,8 @@ struct iwl_rxon_context {
1162 */ 1162 */
1163 bool always_active, is_active; 1163 bool always_active, is_active;
1164 1164
1165 bool ht_need_multiple_chains;
1166
1165 enum iwl_rxon_context_id ctxid; 1167 enum iwl_rxon_context_id ctxid;
1166 1168
1167 u32 interface_modes, exclusive_interface_modes; 1169 u32 interface_modes, exclusive_interface_modes;
@@ -1469,7 +1471,7 @@ struct iwl_priv {
1469 1471
1470 /* bt coex */ 1472 /* bt coex */
1471 u8 bt_status; 1473 u8 bt_status;
1472 u8 bt_traffic_load, notif_bt_traffic_load; 1474 u8 bt_traffic_load, last_bt_traffic_load;
1473 bool bt_ch_announce; 1475 bool bt_ch_announce;
1474 bool bt_sco_active; 1476 bool bt_sco_active;
1475 bool bt_full_concurrent; 1477 bool bt_full_concurrent;
@@ -1480,7 +1482,6 @@ struct iwl_priv {
1480 u16 bt_on_thresh; 1482 u16 bt_on_thresh;
1481 u16 bt_duration; 1483 u16 bt_duration;
1482 u16 dynamic_frag_thresh; 1484 u16 dynamic_frag_thresh;
1483 u16 dynamic_agg_thresh;
1484 u8 bt_ci_compliance; 1485 u8 bt_ci_compliance;
1485 struct work_struct bt_traffic_change_work; 1486 struct work_struct bt_traffic_change_work;
1486 1487
@@ -1517,6 +1518,7 @@ struct iwl_priv {
1517 s8 tx_power_user_lmt; 1518 s8 tx_power_user_lmt;
1518 s8 tx_power_device_lmt; 1519 s8 tx_power_device_lmt;
1519 s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */ 1520 s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
1521 s8 tx_power_next;
1520 1522
1521 1523
1522#ifdef CONFIG_IWLWIFI_DEBUG 1524#ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index d9b590625ae..e87be1e551a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -110,9 +110,18 @@ enum {
110}; 110};
111 111
112/* SKU Capabilities */ 112/* SKU Capabilities */
113/* 3945 only */
113#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) 114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
114#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) 115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
115 116
117/* 5000 and up */
118#define EEPROM_SKU_CAP_BAND_POS (4)
119#define EEPROM_SKU_CAP_BAND_SELECTION \
120 (3 << EEPROM_SKU_CAP_BAND_POS)
121#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
122#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
123#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
124
116/* *regulatory* channel data format in eeprom, one for each channel. 125/* *regulatory* channel data format in eeprom, one for each channel.
117 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */ 126 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
118struct iwl_eeprom_channel { 127struct iwl_eeprom_channel {
@@ -397,7 +406,7 @@ struct iwl_eeprom_calib_info {
397#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ 406#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
398#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ 407#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
399#define EEPROM_VERSION (2*0x44) /* 2 bytes */ 408#define EEPROM_VERSION (2*0x44) /* 2 bytes */
400#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */ 409#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
401#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ 410#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
402#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ 411#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
403#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ 412#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
@@ -504,6 +513,7 @@ struct iwl_eeprom_ops {
504int iwl_eeprom_init(struct iwl_priv *priv); 513int iwl_eeprom_init(struct iwl_priv *priv);
505void iwl_eeprom_free(struct iwl_priv *priv); 514void iwl_eeprom_free(struct iwl_priv *priv);
506int iwl_eeprom_check_version(struct iwl_priv *priv); 515int iwl_eeprom_check_version(struct iwl_priv *priv);
516int iwl_eeprom_check_sku(struct iwl_priv *priv);
507const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); 517const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
508int iwlcore_eeprom_verify_signature(struct iwl_priv *priv); 518int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
509u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset); 519u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 1aaef70deae..3f5bedd8875 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -44,15 +44,6 @@ static inline struct ieee80211_conf *ieee80211_get_hw_conf(
44 return &hw->conf; 44 return &hw->conf;
45} 45}
46 46
47static inline unsigned long elapsed_jiffies(unsigned long start,
48 unsigned long end)
49{
50 if (end >= start)
51 return end - start;
52
53 return end + (MAX_JIFFY_OFFSET - start) + 1;
54}
55
56/** 47/**
57 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 48 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
58 * @index -- current index 49 * @index -- current index
@@ -104,42 +95,36 @@ static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
104 * | | | | | | | | 95 * | | | | | | | |
105 * | | | | | | +-+-------- AC queue (0-3) 96 * | | | | | | +-+-------- AC queue (0-3)
106 * | | | | | | 97 * | | | | | |
107 * | +-+-+-+-+------------ HW A-MPDU queue 98 * | +-+-+-+-+------------ HW queue ID
108 * | 99 * |
109 * +---------------------- indicates agg queue 100 * +---------------------- unused
110 */ 101 */
111static inline u8 iwl_virtual_agg_queue_num(u8 ac, u8 hwq) 102static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
112{ 103{
113 BUG_ON(ac > 3); /* only have 2 bits */ 104 BUG_ON(ac > 3); /* only have 2 bits */
114 BUG_ON(hwq > 31); /* only have 5 bits */ 105 BUG_ON(hwq > 31); /* only use 5 bits */
115 106
116 return 0x80 | (hwq << 2) | ac; 107 txq->swq_id = (hwq << 2) | ac;
117} 108}
118 109
119static inline void iwl_wake_queue(struct iwl_priv *priv, u8 queue) 110static inline void iwl_wake_queue(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq)
120{ 112{
121 u8 ac = queue; 113 u8 queue = txq->swq_id;
122 u8 hwq = queue; 114 u8 ac = queue & 3;
123 115 u8 hwq = (queue >> 2) & 0x1f;
124 if (queue & 0x80) {
125 ac = queue & 3;
126 hwq = (queue >> 2) & 0x1f;
127 }
128 116
129 if (test_and_clear_bit(hwq, priv->queue_stopped)) 117 if (test_and_clear_bit(hwq, priv->queue_stopped))
130 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0) 118 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
131 ieee80211_wake_queue(priv->hw, ac); 119 ieee80211_wake_queue(priv->hw, ac);
132} 120}
133 121
134static inline void iwl_stop_queue(struct iwl_priv *priv, u8 queue) 122static inline void iwl_stop_queue(struct iwl_priv *priv,
123 struct iwl_tx_queue *txq)
135{ 124{
136 u8 ac = queue; 125 u8 queue = txq->swq_id;
137 u8 hwq = queue; 126 u8 ac = queue & 3;
138 127 u8 hwq = (queue >> 2) & 0x1f;
139 if (queue & 0x80) {
140 ac = queue & 3;
141 hwq = (queue >> 2) & 0x1f;
142 }
143 128
144 if (!test_and_set_bit(hwq, priv->queue_stopped)) 129 if (!test_and_set_bit(hwq, priv->queue_stopped))
145 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0) 130 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 86c2b6fed0c..516e5577ed2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -45,9 +45,8 @@
45/* default: IWL_LED_BLINK(0) using blinking index table */ 45/* default: IWL_LED_BLINK(0) using blinking index table */
46static int led_mode; 46static int led_mode;
47module_param(led_mode, int, S_IRUGO); 47module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), " 48MODULE_PARM_DESC(led_mode, "led mode: 0=system default, "
49 "(default 0)"); 49 "1=On(RF On)/Off(RF Off), 2=blinking");
50
51 50
52static const struct { 51static const struct {
53 u16 tpt; /* Mb/s */ 52 u16 tpt; /* Mb/s */
@@ -128,12 +127,13 @@ EXPORT_SYMBOL(iwl_led_start);
128int iwl_led_associate(struct iwl_priv *priv) 127int iwl_led_associate(struct iwl_priv *priv)
129{ 128{
130 IWL_DEBUG_LED(priv, "Associated\n"); 129 IWL_DEBUG_LED(priv, "Associated\n");
131 if (led_mode == IWL_LED_BLINK) 130 if (priv->cfg->led_mode == IWL_LED_BLINK)
132 priv->allow_blinking = 1; 131 priv->allow_blinking = 1;
133 priv->last_blink_time = jiffies; 132 priv->last_blink_time = jiffies;
134 133
135 return 0; 134 return 0;
136} 135}
136EXPORT_SYMBOL(iwl_led_associate);
137 137
138int iwl_led_disassociate(struct iwl_priv *priv) 138int iwl_led_disassociate(struct iwl_priv *priv)
139{ 139{
@@ -141,6 +141,7 @@ int iwl_led_disassociate(struct iwl_priv *priv)
141 141
142 return 0; 142 return 0;
143} 143}
144EXPORT_SYMBOL(iwl_led_disassociate);
144 145
145/* 146/*
146 * calculate blink rate according to last second Tx/Rx activities 147 * calculate blink rate according to last second Tx/Rx activities
@@ -221,5 +222,8 @@ void iwl_leds_init(struct iwl_priv *priv)
221 priv->last_blink_rate = 0; 222 priv->last_blink_rate = 0;
222 priv->last_blink_time = 0; 223 priv->last_blink_time = 0;
223 priv->allow_blinking = 0; 224 priv->allow_blinking = 0;
225 if (led_mode != IWL_LED_DEFAULT &&
226 led_mode != priv->cfg->led_mode)
227 priv->cfg->led_mode = led_mode;
224} 228}
225EXPORT_SYMBOL(iwl_leds_init); 229EXPORT_SYMBOL(iwl_leds_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 49a70baa3fb..9079b33486e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -47,14 +47,16 @@ enum led_type {
47 47
48/* 48/*
49 * LED mode 49 * LED mode
50 * IWL_LED_BLINK: adjust led blink rate based on blink table 50 * IWL_LED_DEFAULT: use system default
51 * IWL_LED_RF_STATE: turn LED on/off based on RF state 51 * IWL_LED_RF_STATE: turn LED on/off based on RF state
52 * LED ON = RF ON 52 * LED ON = RF ON
53 * LED OFF = RF OFF 53 * LED OFF = RF OFF
54 * IWL_LED_BLINK: adjust led blink rate based on blink table
54 */ 55 */
55enum iwl_led_mode { 56enum iwl_led_mode {
56 IWL_LED_BLINK, 57 IWL_LED_DEFAULT,
57 IWL_LED_RF_STATE, 58 IWL_LED_RF_STATE,
59 IWL_LED_BLINK,
58}; 60};
59 61
60void iwl_leds_init(struct iwl_priv *priv); 62void iwl_leds_init(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
new file mode 100644
index 00000000000..a08b4e56e6b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c
@@ -0,0 +1,662 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-helpers.h"
35#include "iwl-legacy.h"
36
37static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
38{
39 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
40 return;
41
42 if (!ctx->is_active)
43 return;
44
45 ctx->qos_data.def_qos_parm.qos_flags = 0;
46
47 if (ctx->qos_data.qos_active)
48 ctx->qos_data.def_qos_parm.qos_flags |=
49 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
50
51 if (ctx->ht.enabled)
52 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
53
54 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
55 ctx->qos_data.qos_active,
56 ctx->qos_data.def_qos_parm.qos_flags);
57
58 iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
59 sizeof(struct iwl_qosparam_cmd),
60 &ctx->qos_data.def_qos_parm, NULL);
61}
62
63/**
64 * iwl_legacy_mac_config - mac80211 config callback
65 */
66int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
67{
68 struct iwl_priv *priv = hw->priv;
69 const struct iwl_channel_info *ch_info;
70 struct ieee80211_conf *conf = &hw->conf;
71 struct ieee80211_channel *channel = conf->channel;
72 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
73 struct iwl_rxon_context *ctx;
74 unsigned long flags = 0;
75 int ret = 0;
76 u16 ch;
77 int scan_active = 0;
78 bool ht_changed[NUM_IWL_RXON_CTX] = {};
79
80 if (WARN_ON(!priv->cfg->ops->legacy))
81 return -EOPNOTSUPP;
82
83 mutex_lock(&priv->mutex);
84
85 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
86 channel->hw_value, changed);
87
88 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
89 test_bit(STATUS_SCANNING, &priv->status))) {
90 scan_active = 1;
91 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
92 }
93
94 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
95 IEEE80211_CONF_CHANGE_CHANNEL)) {
96 /* mac80211 uses static for non-HT which is what we want */
97 priv->current_ht_config.smps = conf->smps_mode;
98
99 /*
100 * Recalculate chain counts.
101 *
102 * If monitor mode is enabled then mac80211 will
103 * set up the SM PS mode to OFF if an HT channel is
104 * configured.
105 */
106 if (priv->cfg->ops->hcmd->set_rxon_chain)
107 for_each_context(priv, ctx)
108 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
109 }
110
111 /* during scanning mac80211 will delay channel setting until
112 * scan finish with changed = 0
113 */
114 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
115 if (scan_active)
116 goto set_ch_out;
117
118 ch = channel->hw_value;
119 ch_info = iwl_get_channel_info(priv, channel->band, ch);
120 if (!is_channel_valid(ch_info)) {
121 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
122 ret = -EINVAL;
123 goto set_ch_out;
124 }
125
126 spin_lock_irqsave(&priv->lock, flags);
127
128 for_each_context(priv, ctx) {
129 /* Configure HT40 channels */
130 if (ctx->ht.enabled != conf_is_ht(conf)) {
131 ctx->ht.enabled = conf_is_ht(conf);
132 ht_changed[ctx->ctxid] = true;
133 }
134 if (ctx->ht.enabled) {
135 if (conf_is_ht40_minus(conf)) {
136 ctx->ht.extension_chan_offset =
137 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
138 ctx->ht.is_40mhz = true;
139 } else if (conf_is_ht40_plus(conf)) {
140 ctx->ht.extension_chan_offset =
141 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
142 ctx->ht.is_40mhz = true;
143 } else {
144 ctx->ht.extension_chan_offset =
145 IEEE80211_HT_PARAM_CHA_SEC_NONE;
146 ctx->ht.is_40mhz = false;
147 }
148 } else
149 ctx->ht.is_40mhz = false;
150
151 /*
152 * Default to no protection. Protection mode will
153 * later be set from BSS config in iwl_ht_conf
154 */
155 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
156
157 /* if we are switching from ht to 2.4 clear flags
158 * from any ht related info since 2.4 does not
159 * support ht */
160 if ((le16_to_cpu(ctx->staging.channel) != ch))
161 ctx->staging.flags = 0;
162
163 iwl_set_rxon_channel(priv, channel, ctx);
164 iwl_set_rxon_ht(priv, ht_conf);
165
166 iwl_set_flags_for_band(priv, ctx, channel->band,
167 ctx->vif);
168 }
169
170 spin_unlock_irqrestore(&priv->lock, flags);
171
172 if (priv->cfg->ops->legacy->update_bcast_stations)
173 ret = priv->cfg->ops->legacy->update_bcast_stations(priv);
174
175 set_ch_out:
176 /* The list of supported rates and rate mask can be different
177 * for each band; since the band may have changed, reset
178 * the rate mask to what mac80211 lists */
179 iwl_set_rate(priv);
180 }
181
182 if (changed & (IEEE80211_CONF_CHANGE_PS |
183 IEEE80211_CONF_CHANGE_IDLE)) {
184 ret = iwl_power_update_mode(priv, false);
185 if (ret)
186 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
187 }
188
189 if (changed & IEEE80211_CONF_CHANGE_POWER) {
190 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
191 priv->tx_power_user_lmt, conf->power_level);
192
193 iwl_set_tx_power(priv, conf->power_level, false);
194 }
195
196 if (!iwl_is_ready(priv)) {
197 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
198 goto out;
199 }
200
201 if (scan_active)
202 goto out;
203
204 for_each_context(priv, ctx) {
205 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
206 iwlcore_commit_rxon(priv, ctx);
207 else
208 IWL_DEBUG_INFO(priv,
209 "Not re-sending same RXON configuration.\n");
210 if (ht_changed[ctx->ctxid])
211 iwl_update_qos(priv, ctx);
212 }
213
214out:
215 IWL_DEBUG_MAC80211(priv, "leave\n");
216 mutex_unlock(&priv->mutex);
217 return ret;
218}
219EXPORT_SYMBOL(iwl_legacy_mac_config);
220
221void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
222{
223 struct iwl_priv *priv = hw->priv;
224 unsigned long flags;
225 /* IBSS can only be the IWL_RXON_CTX_BSS context */
226 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
227
228 if (WARN_ON(!priv->cfg->ops->legacy))
229 return;
230
231 mutex_lock(&priv->mutex);
232 IWL_DEBUG_MAC80211(priv, "enter\n");
233
234 spin_lock_irqsave(&priv->lock, flags);
235 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
236 spin_unlock_irqrestore(&priv->lock, flags);
237
238 spin_lock_irqsave(&priv->lock, flags);
239
240 /* new association get rid of ibss beacon skb */
241 if (priv->beacon_skb)
242 dev_kfree_skb(priv->beacon_skb);
243
244 priv->beacon_skb = NULL;
245
246 priv->timestamp = 0;
247
248 spin_unlock_irqrestore(&priv->lock, flags);
249
250 iwl_scan_cancel_timeout(priv, 100);
251 if (!iwl_is_ready_rf(priv)) {
252 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
253 mutex_unlock(&priv->mutex);
254 return;
255 }
256
257 /* we are restarting association process
258 * clear RXON_FILTER_ASSOC_MSK bit
259 */
260 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
261 iwlcore_commit_rxon(priv, ctx);
262
263 iwl_set_rate(priv);
264
265 mutex_unlock(&priv->mutex);
266
267 IWL_DEBUG_MAC80211(priv, "leave\n");
268}
269EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
270
271static void iwl_ht_conf(struct iwl_priv *priv,
272 struct ieee80211_vif *vif)
273{
274 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
275 struct ieee80211_sta *sta;
276 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
277 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
278
279 IWL_DEBUG_ASSOC(priv, "enter:\n");
280
281 if (!ctx->ht.enabled)
282 return;
283
284 ctx->ht.protection =
285 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
286 ctx->ht.non_gf_sta_present =
287 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
288
289 ht_conf->single_chain_sufficient = false;
290
291 switch (vif->type) {
292 case NL80211_IFTYPE_STATION:
293 rcu_read_lock();
294 sta = ieee80211_find_sta(vif, bss_conf->bssid);
295 if (sta) {
296 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
297 int maxstreams;
298
299 maxstreams = (ht_cap->mcs.tx_params &
300 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
301 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
302 maxstreams += 1;
303
304 if ((ht_cap->mcs.rx_mask[1] == 0) &&
305 (ht_cap->mcs.rx_mask[2] == 0))
306 ht_conf->single_chain_sufficient = true;
307 if (maxstreams <= 1)
308 ht_conf->single_chain_sufficient = true;
309 } else {
310 /*
311 * If at all, this can only happen through a race
312 * when the AP disconnects us while we're still
313 * setting up the connection, in that case mac80211
314 * will soon tell us about that.
315 */
316 ht_conf->single_chain_sufficient = true;
317 }
318 rcu_read_unlock();
319 break;
320 case NL80211_IFTYPE_ADHOC:
321 ht_conf->single_chain_sufficient = true;
322 break;
323 default:
324 break;
325 }
326
327 IWL_DEBUG_ASSOC(priv, "leave\n");
328}
329
330static inline void iwl_set_no_assoc(struct iwl_priv *priv,
331 struct ieee80211_vif *vif)
332{
333 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
334
335 iwl_led_disassociate(priv);
336 /*
337 * inform the ucode that there is no longer an
338 * association and that no more packets should be
339 * sent
340 */
341 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
342 ctx->staging.assoc_id = 0;
343 iwlcore_commit_rxon(priv, ctx);
344}
345
346static void iwlcore_beacon_update(struct ieee80211_hw *hw,
347 struct ieee80211_vif *vif)
348{
349 struct iwl_priv *priv = hw->priv;
350 unsigned long flags;
351 __le64 timestamp;
352 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
353
354 if (!skb)
355 return;
356
357 IWL_DEBUG_MAC80211(priv, "enter\n");
358
359 lockdep_assert_held(&priv->mutex);
360
361 if (!priv->beacon_ctx) {
362 IWL_ERR(priv, "update beacon but no beacon context!\n");
363 dev_kfree_skb(skb);
364 return;
365 }
366
367 spin_lock_irqsave(&priv->lock, flags);
368
369 if (priv->beacon_skb)
370 dev_kfree_skb(priv->beacon_skb);
371
372 priv->beacon_skb = skb;
373
374 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
375 priv->timestamp = le64_to_cpu(timestamp);
376
377 IWL_DEBUG_MAC80211(priv, "leave\n");
378 spin_unlock_irqrestore(&priv->lock, flags);
379
380 if (!iwl_is_ready_rf(priv)) {
381 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
382 return;
383 }
384
385 priv->cfg->ops->legacy->post_associate(priv);
386}
387
388void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
389 struct ieee80211_vif *vif,
390 struct ieee80211_bss_conf *bss_conf,
391 u32 changes)
392{
393 struct iwl_priv *priv = hw->priv;
394 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
395 int ret;
396
397 if (WARN_ON(!priv->cfg->ops->legacy))
398 return;
399
400 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
401
402 if (!iwl_is_alive(priv))
403 return;
404
405 mutex_lock(&priv->mutex);
406
407 if (changes & BSS_CHANGED_QOS) {
408 unsigned long flags;
409
410 spin_lock_irqsave(&priv->lock, flags);
411 ctx->qos_data.qos_active = bss_conf->qos;
412 iwl_update_qos(priv, ctx);
413 spin_unlock_irqrestore(&priv->lock, flags);
414 }
415
416 if (changes & BSS_CHANGED_BEACON_ENABLED) {
417 /*
418 * the add_interface code must make sure we only ever
419 * have a single interface that could be beaconing at
420 * any time.
421 */
422 if (vif->bss_conf.enable_beacon)
423 priv->beacon_ctx = ctx;
424 else
425 priv->beacon_ctx = NULL;
426 }
427
428 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
429 dev_kfree_skb(priv->beacon_skb);
430 priv->beacon_skb = ieee80211_beacon_get(hw, vif);
431 }
432
433 if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
434 iwl_send_rxon_timing(priv, ctx);
435
436 if (changes & BSS_CHANGED_BSSID) {
437 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
438
439 /*
440 * If there is currently a HW scan going on in the
441 * background then we need to cancel it else the RXON
442 * below/in post_associate will fail.
443 */
444 if (iwl_scan_cancel_timeout(priv, 100)) {
445 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
446 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
447 mutex_unlock(&priv->mutex);
448 return;
449 }
450
451 /* mac80211 only sets assoc when in STATION mode */
452 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
453 memcpy(ctx->staging.bssid_addr,
454 bss_conf->bssid, ETH_ALEN);
455
456 /* currently needed in a few places */
457 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
458 } else {
459 ctx->staging.filter_flags &=
460 ~RXON_FILTER_ASSOC_MSK;
461 }
462
463 }
464
465 /*
466 * This needs to be after setting the BSSID in case
467 * mac80211 decides to do both changes at once because
468 * it will invoke post_associate.
469 */
470 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
471 iwlcore_beacon_update(hw, vif);
472
473 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
474 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
475 bss_conf->use_short_preamble);
476 if (bss_conf->use_short_preamble)
477 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
478 else
479 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
480 }
481
482 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
483 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
484 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
485 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
486 else
487 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
488 if (bss_conf->use_cts_prot)
489 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
490 else
491 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
492 }
493
494 if (changes & BSS_CHANGED_BASIC_RATES) {
495 /* XXX use this information
496 *
497 * To do that, remove code from iwl_set_rate() and put something
498 * like this here:
499 *
500 if (A-band)
501 ctx->staging.ofdm_basic_rates =
502 bss_conf->basic_rates;
503 else
504 ctx->staging.ofdm_basic_rates =
505 bss_conf->basic_rates >> 4;
506 ctx->staging.cck_basic_rates =
507 bss_conf->basic_rates & 0xF;
508 */
509 }
510
511 if (changes & BSS_CHANGED_HT) {
512 iwl_ht_conf(priv, vif);
513
514 if (priv->cfg->ops->hcmd->set_rxon_chain)
515 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
516 }
517
518 if (changes & BSS_CHANGED_ASSOC) {
519 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
520 if (bss_conf->assoc) {
521 priv->timestamp = bss_conf->timestamp;
522
523 iwl_led_associate(priv);
524
525 if (!iwl_is_rfkill(priv))
526 priv->cfg->ops->legacy->post_associate(priv);
527 } else
528 iwl_set_no_assoc(priv, vif);
529 }
530
531 if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
532 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
533 changes);
534 ret = iwl_send_rxon_assoc(priv, ctx);
535 if (!ret) {
536 /* Sync active_rxon with latest change. */
537 memcpy((void *)&ctx->active,
538 &ctx->staging,
539 sizeof(struct iwl_rxon_cmd));
540 }
541 }
542
543 if (changes & BSS_CHANGED_BEACON_ENABLED) {
544 if (vif->bss_conf.enable_beacon) {
545 memcpy(ctx->staging.bssid_addr,
546 bss_conf->bssid, ETH_ALEN);
547 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
548 iwl_led_associate(priv);
549 priv->cfg->ops->legacy->config_ap(priv);
550 } else
551 iwl_set_no_assoc(priv, vif);
552 }
553
554 if (changes & BSS_CHANGED_IBSS) {
555 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
556 bss_conf->ibss_joined);
557 if (ret)
558 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
559 bss_conf->ibss_joined ? "add" : "remove",
560 bss_conf->bssid);
561 }
562
563 mutex_unlock(&priv->mutex);
564
565 IWL_DEBUG_MAC80211(priv, "leave\n");
566}
567EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
568
569irqreturn_t iwl_isr_legacy(int irq, void *data)
570{
571 struct iwl_priv *priv = data;
572 u32 inta, inta_mask;
573 u32 inta_fh;
574 unsigned long flags;
575 if (!priv)
576 return IRQ_NONE;
577
578 spin_lock_irqsave(&priv->lock, flags);
579
580 /* Disable (but don't clear!) interrupts here to avoid
581 * back-to-back ISRs and sporadic interrupts from our NIC.
582 * If we have something to service, the tasklet will re-enable ints.
583 * If we *don't* have something, we'll re-enable before leaving here. */
584 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
585 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
586
587 /* Discover which interrupts are active/pending */
588 inta = iwl_read32(priv, CSR_INT);
589 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
590
591 /* Ignore interrupt if there's nothing in NIC to service.
592 * This may be due to IRQ shared with another device,
593 * or due to sporadic interrupts thrown from our NIC. */
594 if (!inta && !inta_fh) {
595 IWL_DEBUG_ISR(priv,
596 "Ignore interrupt, inta == 0, inta_fh == 0\n");
597 goto none;
598 }
599
600 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
601 /* Hardware disappeared. It might have already raised
602 * an interrupt */
603 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
604 goto unplugged;
605 }
606
607 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
608 inta, inta_mask, inta_fh);
609
610 inta &= ~CSR_INT_BIT_SCD;
611
612 /* iwl_irq_tasklet() will service interrupts and re-enable them */
613 if (likely(inta || inta_fh))
614 tasklet_schedule(&priv->irq_tasklet);
615
616unplugged:
617 spin_unlock_irqrestore(&priv->lock, flags);
618 return IRQ_HANDLED;
619
620none:
621 /* re-enable interrupts here since we don't have anything to service. */
622 /* only Re-enable if diabled by irq */
623 if (test_bit(STATUS_INT_ENABLED, &priv->status))
624 iwl_enable_interrupts(priv);
625 spin_unlock_irqrestore(&priv->lock, flags);
626 return IRQ_NONE;
627}
628EXPORT_SYMBOL(iwl_isr_legacy);
629
630/*
631 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
632 * function.
633 */
634void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
635 struct ieee80211_tx_info *info,
636 __le16 fc, __le32 *tx_flags)
637{
638 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
639 *tx_flags |= TX_CMD_FLG_RTS_MSK;
640 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
641 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
642
643 if (!ieee80211_is_mgmt(fc))
644 return;
645
646 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
647 case cpu_to_le16(IEEE80211_STYPE_AUTH):
648 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
649 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
650 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
651 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
652 *tx_flags |= TX_CMD_FLG_CTS_MSK;
653 break;
654 }
655 } else if (info->control.rates[0].flags &
656 IEEE80211_TX_RC_USE_CTS_PROTECT) {
657 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
658 *tx_flags |= TX_CMD_FLG_CTS_MSK;
659 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
660 }
661}
662EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.h b/drivers/net/wireless/iwlwifi/iwl-legacy.h
new file mode 100644
index 00000000000..9f7b2f93596
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-legacy.h
@@ -0,0 +1,79 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_h__
64#define __iwl_legacy_h__
65
66/* mac80211 handlers */
67int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
68void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
69void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
70 struct ieee80211_vif *vif,
71 struct ieee80211_bss_conf *bss_conf,
72 u32 changes);
73void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
74 struct ieee80211_tx_info *info,
75 __le16 fc, __le32 *tx_flags);
76
77irqreturn_t iwl_isr_legacy(int irq, void *data);
78
79#endif /* __iwl_legacy_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 49d7788937a..1eec18d909d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -75,6 +75,10 @@ struct iwl_power_vec_entry {
75 75
76#define NOSLP cpu_to_le16(0), 0, 0 76#define NOSLP cpu_to_le16(0), 0, 0
77#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0 77#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
78#define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK | \
79 IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \
80 IWL_POWER_ADVANCE_PM_ENA_MSK)
81#define ASLP_TOUT(T) cpu_to_le32(T)
78#define TU_TO_USEC 1024 82#define TU_TO_USEC 1024
79#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC) 83#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
80#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \ 84#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
@@ -114,6 +118,52 @@ static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
114 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} 118 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
115}; 119};
116 120
121/* advance power management */
122/* DTIM 0 - 2 */
123static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = {
124 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
125 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
126 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
127 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
128 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
129 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
130 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
131 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
132 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
133 SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
134};
135
136
137/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
138/* DTIM 3 - 10 */
139static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = {
140 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
141 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
142 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
143 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
144 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
145 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
146 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
147 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
148 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
149 SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2}
150};
151
152/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
153/* DTIM 11 - */
154static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = {
155 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
156 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
157 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
158 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
159 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
160 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
161 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
162 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
163 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
164 SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
165};
166
117static void iwl_static_sleep_cmd(struct iwl_priv *priv, 167static void iwl_static_sleep_cmd(struct iwl_priv *priv,
118 struct iwl_powertable_cmd *cmd, 168 struct iwl_powertable_cmd *cmd,
119 enum iwl_power_level lvl, int period) 169 enum iwl_power_level lvl, int period)
@@ -124,11 +174,19 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
124 u8 skip; 174 u8 skip;
125 u32 slp_itrvl; 175 u32 slp_itrvl;
126 176
127 table = range_2; 177 if (priv->cfg->adv_pm) {
128 if (period <= IWL_DTIM_RANGE_1_MAX) 178 table = apm_range_2;
129 table = range_1; 179 if (period <= IWL_DTIM_RANGE_1_MAX)
130 if (period <= IWL_DTIM_RANGE_0_MAX) 180 table = apm_range_1;
131 table = range_0; 181 if (period <= IWL_DTIM_RANGE_0_MAX)
182 table = apm_range_0;
183 } else {
184 table = range_2;
185 if (period <= IWL_DTIM_RANGE_1_MAX)
186 table = range_1;
187 if (period <= IWL_DTIM_RANGE_0_MAX)
188 table = range_0;
189 }
132 190
133 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM); 191 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
134 192
@@ -163,6 +221,20 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
163 else 221 else
164 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; 222 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
165 223
224 if (priv->cfg->base_params->shadow_reg_enable)
225 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
226 else
227 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
228
229 if (priv->cfg->bt_params &&
230 priv->cfg->bt_params->advanced_bt_coexist) {
231 if (!priv->cfg->bt_params->bt_sco_disable)
232 cmd->flags |= IWL_POWER_BT_SCO_ENA;
233 else
234 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
235 }
236
237
166 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); 238 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
167 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL) 239 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
168 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = 240 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
@@ -236,6 +308,19 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
236 if (priv->power_data.pci_pm) 308 if (priv->power_data.pci_pm)
237 cmd->flags |= IWL_POWER_PCI_PM_MSK; 309 cmd->flags |= IWL_POWER_PCI_PM_MSK;
238 310
311 if (priv->cfg->base_params->shadow_reg_enable)
312 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
313 else
314 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
315
316 if (priv->cfg->bt_params &&
317 priv->cfg->bt_params->advanced_bt_coexist) {
318 if (!priv->cfg->bt_params->bt_sco_disable)
319 cmd->flags |= IWL_POWER_BT_SCO_ENA;
320 else
321 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
322 }
323
239 cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms); 324 cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms);
240 cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms); 325 cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms);
241 326
@@ -263,70 +348,95 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
263 sizeof(struct iwl_powertable_cmd), cmd); 348 sizeof(struct iwl_powertable_cmd), cmd);
264} 349}
265 350
266/* priv->mutex must be held */ 351static void iwl_power_build_cmd(struct iwl_priv *priv,
267int iwl_power_update_mode(struct iwl_priv *priv, bool force) 352 struct iwl_powertable_cmd *cmd)
268{ 353{
269 int ret = 0;
270 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS; 354 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
271 bool update_chains;
272 struct iwl_powertable_cmd cmd;
273 int dtimper; 355 int dtimper;
274 356
275 /* Don't update the RX chain when chain noise calibration is running */
276 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
277 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
278
279 dtimper = priv->hw->conf.ps_dtim_period ?: 1; 357 dtimper = priv->hw->conf.ps_dtim_period ?: 1;
280 358
281 if (priv->cfg->base_params->broken_powersave) 359 if (priv->cfg->base_params->broken_powersave)
282 iwl_power_sleep_cam_cmd(priv, &cmd); 360 iwl_power_sleep_cam_cmd(priv, cmd);
283 else if (priv->cfg->base_params->supports_idle && 361 else if (priv->cfg->base_params->supports_idle &&
284 priv->hw->conf.flags & IEEE80211_CONF_IDLE) 362 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
285 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20); 363 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
286 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection && 364 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
287 priv->cfg->ops->lib->tt_ops.tt_power_mode && 365 priv->cfg->ops->lib->tt_ops.tt_power_mode &&
288 priv->cfg->ops->lib->tt_ops.lower_power_detection(priv)) { 366 priv->cfg->ops->lib->tt_ops.lower_power_detection(priv)) {
289 /* in thermal throttling low power state */ 367 /* in thermal throttling low power state */
290 iwl_static_sleep_cmd(priv, &cmd, 368 iwl_static_sleep_cmd(priv, cmd,
291 priv->cfg->ops->lib->tt_ops.tt_power_mode(priv), dtimper); 369 priv->cfg->ops->lib->tt_ops.tt_power_mode(priv), dtimper);
292 } else if (!enabled) 370 } else if (!enabled)
293 iwl_power_sleep_cam_cmd(priv, &cmd); 371 iwl_power_sleep_cam_cmd(priv, cmd);
294 else if (priv->power_data.debug_sleep_level_override >= 0) 372 else if (priv->power_data.debug_sleep_level_override >= 0)
295 iwl_static_sleep_cmd(priv, &cmd, 373 iwl_static_sleep_cmd(priv, cmd,
296 priv->power_data.debug_sleep_level_override, 374 priv->power_data.debug_sleep_level_override,
297 dtimper); 375 dtimper);
298 else if (no_sleep_autoadjust) 376 else if (no_sleep_autoadjust)
299 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_1, dtimper); 377 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_1, dtimper);
300 else 378 else
301 iwl_power_fill_sleep_cmd(priv, &cmd, 379 iwl_power_fill_sleep_cmd(priv, cmd,
302 priv->hw->conf.dynamic_ps_timeout, 380 priv->hw->conf.dynamic_ps_timeout,
303 priv->hw->conf.max_sleep_period); 381 priv->hw->conf.max_sleep_period);
382}
383
384int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
385 bool force)
386{
387 int ret;
388 bool update_chains;
389
390 lockdep_assert_held(&priv->mutex);
391
392 /* Don't update the RX chain when chain noise calibration is running */
393 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
394 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
395
396 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
397 return 0;
398
399 if (!iwl_is_ready_rf(priv))
400 return -EIO;
304 401
305 if (iwl_is_ready_rf(priv) && 402 /* scan complete use sleep_power_next, need to be updated */
306 (memcmp(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)) || force)) { 403 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
307 if (cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) 404 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
308 set_bit(STATUS_POWER_PMI, &priv->status); 405 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
309 406 return 0;
310 ret = iwl_set_power(priv, &cmd); 407 }
311 if (!ret) { 408
312 if (!(cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) 409 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
313 clear_bit(STATUS_POWER_PMI, &priv->status); 410 set_bit(STATUS_POWER_PMI, &priv->status);
314 411
315 if (priv->cfg->ops->lib->update_chain_flags && 412 ret = iwl_set_power(priv, cmd);
316 update_chains) 413 if (!ret) {
317 priv->cfg->ops->lib->update_chain_flags(priv); 414 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
318 else if (priv->cfg->ops->lib->update_chain_flags) 415 clear_bit(STATUS_POWER_PMI, &priv->status);
319 IWL_DEBUG_POWER(priv, 416
417 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
418 priv->cfg->ops->lib->update_chain_flags(priv);
419 else if (priv->cfg->ops->lib->update_chain_flags)
420 IWL_DEBUG_POWER(priv,
320 "Cannot update the power, chain noise " 421 "Cannot update the power, chain noise "
321 "calibration running: %d\n", 422 "calibration running: %d\n",
322 priv->chain_noise_data.state); 423 priv->chain_noise_data.state);
323 memcpy(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)); 424
324 } else 425 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
325 IWL_ERR(priv, "set power fail, ret = %d", ret); 426 } else
326 } 427 IWL_ERR(priv, "set power fail, ret = %d", ret);
327 428
328 return ret; 429 return ret;
329} 430}
431EXPORT_SYMBOL(iwl_power_set_mode);
432
433int iwl_power_update_mode(struct iwl_priv *priv, bool force)
434{
435 struct iwl_powertable_cmd cmd;
436
437 iwl_power_build_cmd(priv, &cmd);
438 return iwl_power_set_mode(priv, &cmd, force);
439}
330EXPORT_SYMBOL(iwl_power_update_mode); 440EXPORT_SYMBOL(iwl_power_update_mode);
331 441
332/* initialize to default */ 442/* initialize to default */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index df81565a7cc..fe012032c28 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -41,10 +41,13 @@ enum iwl_power_level {
41 41
42struct iwl_power_mgr { 42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd; 43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
44 int debug_sleep_level_override; 45 int debug_sleep_level_override;
45 bool pci_pm; 46 bool pci_pm;
46}; 47};
47 48
49int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
50 bool force);
48int iwl_power_update_mode(struct iwl_priv *priv, bool force); 51int iwl_power_update_mode(struct iwl_priv *priv, bool force);
49void iwl_power_initialize(struct iwl_priv *priv); 52void iwl_power_initialize(struct iwl_priv *priv);
50 53
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index f436270ca39..87a6fd84d4d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -134,28 +134,37 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
134 if (q->need_update == 0) 134 if (q->need_update == 0)
135 goto exit_unlock; 135 goto exit_unlock;
136 136
137 /* If power-saving is in use, make sure device is awake */ 137 if (priv->cfg->base_params->shadow_reg_enable) {
138 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 138 /* shadow register enabled */
139 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 139 /* Device expects a multiple of 8 */
140 q->write_actual = (q->write & ~0x7);
141 iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
142 } else {
143 /* If power-saving is in use, make sure device is awake */
144 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
145 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
140 146
141 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 147 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
142 IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n", 148 IWL_DEBUG_INFO(priv,
143 reg); 149 "Rx queue requesting wakeup,"
144 iwl_set_bit(priv, CSR_GP_CNTRL, 150 " GP1 = 0x%x\n", reg);
145 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 151 iwl_set_bit(priv, CSR_GP_CNTRL,
146 goto exit_unlock; 152 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
147 } 153 goto exit_unlock;
154 }
148 155
149 q->write_actual = (q->write & ~0x7); 156 q->write_actual = (q->write & ~0x7);
150 iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); 157 iwl_write_direct32(priv, rx_wrt_ptr_reg,
158 q->write_actual);
151 159
152 /* Else device is assumed to be awake */ 160 /* Else device is assumed to be awake */
153 } else { 161 } else {
154 /* Device expects a multiple of 8 */ 162 /* Device expects a multiple of 8 */
155 q->write_actual = (q->write & ~0x7); 163 q->write_actual = (q->write & ~0x7);
156 iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); 164 iwl_write_direct32(priv, rx_wrt_ptr_reg,
165 q->write_actual);
166 }
157 } 167 }
158
159 q->need_update = 0; 168 q->need_update = 0;
160 169
161 exit_unlock: 170 exit_unlock:
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 67da3129578..12d9363d0af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -252,8 +252,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
252 252
253 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", 253 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
254 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", 254 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
255 jiffies_to_msecs(elapsed_jiffies 255 jiffies_to_msecs(jiffies - priv->scan_start));
256 (priv->scan_start, jiffies)));
257 256
258 queue_work(priv->workqueue, &priv->scan_completed); 257 queue_work(priv->workqueue, &priv->scan_completed);
259 258
@@ -603,13 +602,16 @@ out_settings:
603 if (!iwl_is_ready_rf(priv)) 602 if (!iwl_is_ready_rf(priv))
604 goto out; 603 goto out;
605 604
606 /* Since setting the TXPOWER may have been deferred while 605 /*
607 * performing the scan, fire one off */ 606 * We do not commit power settings while scan is pending,
608 iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); 607 * do it now if the settings changed.
608 */
609 iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
610 iwl_set_tx_power(priv, priv->tx_power_next, false);
609 611
610 priv->cfg->ops->utils->post_scan(priv); 612 priv->cfg->ops->utils->post_scan(priv);
611 613
612 out: 614out:
613 mutex_unlock(&priv->mutex); 615 mutex_unlock(&priv->mutex);
614} 616}
615 617
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 7c7f7dcb1b1..0a67b2fa52a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -400,7 +400,8 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
400} 400}
401 401
402static int iwl_send_remove_station(struct iwl_priv *priv, 402static int iwl_send_remove_station(struct iwl_priv *priv,
403 const u8 *addr, int sta_id) 403 const u8 *addr, int sta_id,
404 bool temporary)
404{ 405{
405 struct iwl_rx_packet *pkt; 406 struct iwl_rx_packet *pkt;
406 int ret; 407 int ret;
@@ -436,9 +437,11 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
436 if (!ret) { 437 if (!ret) {
437 switch (pkt->u.rem_sta.status) { 438 switch (pkt->u.rem_sta.status) {
438 case REM_STA_SUCCESS_MSK: 439 case REM_STA_SUCCESS_MSK:
439 spin_lock_irqsave(&priv->sta_lock, flags_spin); 440 if (!temporary) {
440 iwl_sta_ucode_deactivate(priv, sta_id); 441 spin_lock_irqsave(&priv->sta_lock, flags_spin);
441 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 442 iwl_sta_ucode_deactivate(priv, sta_id);
443 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
444 }
442 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 445 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
443 break; 446 break;
444 default: 447 default:
@@ -505,7 +508,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
505 508
506 spin_unlock_irqrestore(&priv->sta_lock, flags); 509 spin_unlock_irqrestore(&priv->sta_lock, flags);
507 510
508 return iwl_send_remove_station(priv, addr, sta_id); 511 return iwl_send_remove_station(priv, addr, sta_id, false);
509out_err: 512out_err:
510 spin_unlock_irqrestore(&priv->sta_lock, flags); 513 spin_unlock_irqrestore(&priv->sta_lock, flags);
511 return -EINVAL; 514 return -EINVAL;
@@ -624,6 +627,44 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
624} 627}
625EXPORT_SYMBOL(iwl_restore_stations); 628EXPORT_SYMBOL(iwl_restore_stations);
626 629
630void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
631{
632 unsigned long flags;
633 int sta_id = ctx->ap_sta_id;
634 int ret;
635 struct iwl_addsta_cmd sta_cmd;
636 struct iwl_link_quality_cmd lq;
637 bool active;
638
639 spin_lock_irqsave(&priv->sta_lock, flags);
640 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
641 spin_unlock_irqrestore(&priv->sta_lock, flags);
642 return;
643 }
644
645 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
646 sta_cmd.mode = 0;
647 memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
648
649 active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
650 spin_unlock_irqrestore(&priv->sta_lock, flags);
651
652 if (active) {
653 ret = iwl_send_remove_station(
654 priv, priv->stations[sta_id].sta.sta.addr,
655 sta_id, true);
656 if (ret)
657 IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
658 priv->stations[sta_id].sta.sta.addr, ret);
659 }
660 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
661 if (ret)
662 IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
663 priv->stations[sta_id].sta.sta.addr, ret);
664 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
665}
666EXPORT_SYMBOL(iwl_reprogram_ap_sta);
667
627int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 668int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
628{ 669{
629 int i; 670 int i;
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 06475872eee..206f1e1a0ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -63,6 +63,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
63 63
64int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 64int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
65 struct iwl_link_quality_cmd *lq, u8 flags, bool init); 65 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
66void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
66 67
67/** 68/**
68 * iwl_clear_driver_stations - clear knowledge of all stations from driver 69 * iwl_clear_driver_stations - clear knowledge of all stations from driver
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 7261ee49f28..90659bcf580 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -49,30 +49,39 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
49 if (txq->need_update == 0) 49 if (txq->need_update == 0)
50 return; 50 return;
51 51
52 /* if we're trying to save power */ 52 if (priv->cfg->base_params->shadow_reg_enable) {
53 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 53 /* shadow register enabled */
54 /* wake up nic if it's powered down ...
55 * uCode will wake up, and interrupt us again, so next
56 * time we'll skip this part. */
57 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
58
59 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
60 IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
61 txq_id, reg);
62 iwl_set_bit(priv, CSR_GP_CNTRL,
63 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
64 return;
65 }
66
67 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
68 txq->q.write_ptr | (txq_id << 8));
69
70 /* else not in power-save mode, uCode will never sleep when we're
71 * trying to tx (during RFKILL, we're not trying to tx). */
72 } else
73 iwl_write32(priv, HBUS_TARG_WRPTR, 54 iwl_write32(priv, HBUS_TARG_WRPTR,
74 txq->q.write_ptr | (txq_id << 8)); 55 txq->q.write_ptr | (txq_id << 8));
56 } else {
57 /* if we're trying to save power */
58 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
59 /* wake up nic if it's powered down ...
60 * uCode will wake up, and interrupt us again, so next
61 * time we'll skip this part. */
62 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
63
64 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
65 IWL_DEBUG_INFO(priv,
66 "Tx queue %d requesting wakeup,"
67 " GP1 = 0x%x\n", txq_id, reg);
68 iwl_set_bit(priv, CSR_GP_CNTRL,
69 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
70 return;
71 }
72
73 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
74 txq->q.write_ptr | (txq_id << 8));
75 75
76 /*
77 * else not in power-save mode,
78 * uCode will never sleep when we're
79 * trying to tx (during RFKILL, we're not trying to tx).
80 */
81 } else
82 iwl_write32(priv, HBUS_TARG_WRPTR,
83 txq->q.write_ptr | (txq_id << 8));
84 }
76 txq->need_update = 0; 85 txq->need_update = 0;
77} 86}
78EXPORT_SYMBOL(iwl_txq_update_write_ptr); 87EXPORT_SYMBOL(iwl_txq_update_write_ptr);
@@ -350,13 +359,12 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
350 txq->need_update = 0; 359 txq->need_update = 0;
351 360
352 /* 361 /*
353 * Aggregation TX queues will get their ID when aggregation begins; 362 * For the default queues 0-3, set up the swq_id
354 * they overwrite the setting done here. The command FIFO doesn't 363 * already -- all others need to get one later
355 * need an swq_id so don't set one to catch errors, all others can 364 * (if they need one at all).
356 * be set up to the identity mapping.
357 */ 365 */
358 if (txq_id != priv->cmd_queue) 366 if (txq_id < 4)
359 txq->swq_id = txq_id; 367 iwl_set_swq_id(txq, txq_id, txq_id);
360 368
361 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 369 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
362 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 370 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 7edf8c2fb8c..8eb1393506b 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -61,6 +61,7 @@
61#include "iwl-helpers.h" 61#include "iwl-helpers.h"
62#include "iwl-dev.h" 62#include "iwl-dev.h"
63#include "iwl-spectrum.h" 63#include "iwl-spectrum.h"
64#include "iwl-legacy.h"
64 65
65/* 66/*
66 * module name, copyright, version, etc. 67 * module name, copyright, version, etc.
@@ -474,7 +475,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
474 dma_addr_t phys_addr; 475 dma_addr_t phys_addr;
475 dma_addr_t txcmd_phys; 476 dma_addr_t txcmd_phys;
476 int txq_id = skb_get_queue_mapping(skb); 477 int txq_id = skb_get_queue_mapping(skb);
477 u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */ 478 u16 len, idx, hdr_len;
478 u8 id; 479 u8 id;
479 u8 unicast; 480 u8 unicast;
480 u8 sta_id; 481 u8 sta_id;
@@ -611,15 +612,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
611 */ 612 */
612 len = sizeof(struct iwl3945_tx_cmd) + 613 len = sizeof(struct iwl3945_tx_cmd) +
613 sizeof(struct iwl_cmd_header) + hdr_len; 614 sizeof(struct iwl_cmd_header) + hdr_len;
614
615 len_org = len;
616 len = (len + 3) & ~3; 615 len = (len + 3) & ~3;
617 616
618 if (len_org != len)
619 len_org = 1;
620 else
621 len_org = 0;
622
623 /* Physical address of this Tx command's header (not MAC header!), 617 /* Physical address of this Tx command's header (not MAC header!),
624 * within command buffer array. */ 618 * within command buffer array. */
625 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, 619 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
@@ -661,7 +655,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
661 spin_unlock_irqrestore(&priv->lock, flags); 655 spin_unlock_irqrestore(&priv->lock, flags);
662 } 656 }
663 657
664 iwl_stop_queue(priv, skb_get_queue_mapping(skb)); 658 iwl_stop_queue(priv, txq);
665 } 659 }
666 660
667 return 0; 661 return 0;
@@ -3057,22 +3051,22 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
3057 mutex_unlock(&priv->mutex); 3051 mutex_unlock(&priv->mutex);
3058} 3052}
3059 3053
3060void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif) 3054void iwl3945_post_associate(struct iwl_priv *priv)
3061{ 3055{
3062 int rc = 0; 3056 int rc = 0;
3063 struct ieee80211_conf *conf = NULL; 3057 struct ieee80211_conf *conf = NULL;
3064 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 3058 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3065 3059
3066 if (!vif || !priv->is_open) 3060 if (!ctx->vif || !priv->is_open)
3067 return; 3061 return;
3068 3062
3069 if (vif->type == NL80211_IFTYPE_AP) { 3063 if (ctx->vif->type == NL80211_IFTYPE_AP) {
3070 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); 3064 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3071 return; 3065 return;
3072 } 3066 }
3073 3067
3074 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3068 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3075 vif->bss_conf.aid, ctx->active.bssid_addr); 3069 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
3076 3070
3077 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3071 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3078 return; 3072 return;
@@ -3091,18 +3085,18 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3091 3085
3092 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 3086 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3093 3087
3094 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); 3088 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
3095 3089
3096 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3090 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3097 vif->bss_conf.aid, vif->bss_conf.beacon_int); 3091 ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
3098 3092
3099 if (vif->bss_conf.use_short_preamble) 3093 if (ctx->vif->bss_conf.use_short_preamble)
3100 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3094 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3101 else 3095 else
3102 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3096 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3103 3097
3104 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { 3098 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3105 if (vif->bss_conf.use_short_slot) 3099 if (ctx->vif->bss_conf.use_short_slot)
3106 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 3100 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3107 else 3101 else
3108 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3102 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
@@ -3110,7 +3104,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3110 3104
3111 iwl3945_commit_rxon(priv, ctx); 3105 iwl3945_commit_rxon(priv, ctx);
3112 3106
3113 switch (vif->type) { 3107 switch (ctx->vif->type) {
3114 case NL80211_IFTYPE_STATION: 3108 case NL80211_IFTYPE_STATION:
3115 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); 3109 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
3116 break; 3110 break;
@@ -3119,7 +3113,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3119 break; 3113 break;
3120 default: 3114 default:
3121 IWL_ERR(priv, "%s Should not be called in %d mode\n", 3115 IWL_ERR(priv, "%s Should not be called in %d mode\n",
3122 __func__, vif->type); 3116 __func__, ctx->vif->type);
3123 break; 3117 break;
3124 } 3118 }
3125} 3119}
@@ -3234,9 +3228,10 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3234 return NETDEV_TX_OK; 3228 return NETDEV_TX_OK;
3235} 3229}
3236 3230
3237void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) 3231void iwl3945_config_ap(struct iwl_priv *priv)
3238{ 3232{
3239 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 3233 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3234 struct ieee80211_vif *vif = ctx->vif;
3240 int rc = 0; 3235 int rc = 0;
3241 3236
3242 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3237 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -3407,9 +3402,9 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3407 ctx->staging.filter_flags |= filter_or; 3402 ctx->staging.filter_flags |= filter_or;
3408 3403
3409 /* 3404 /*
3410 * Committing directly here breaks for some reason, 3405 * Not committing directly because hardware can perform a scan,
3411 * but we'll eventually commit the filter flags 3406 * but even if hw is ready, committing here breaks for some reason,
3412 * change anyway. 3407 * we'll eventually commit the filter flags change anyway.
3413 */ 3408 */
3414 3409
3415 mutex_unlock(&priv->mutex); 3410 mutex_unlock(&priv->mutex);
@@ -3824,18 +3819,19 @@ static struct attribute_group iwl3945_attribute_group = {
3824 .attrs = iwl3945_sysfs_entries, 3819 .attrs = iwl3945_sysfs_entries,
3825}; 3820};
3826 3821
3827static struct ieee80211_ops iwl3945_hw_ops = { 3822struct ieee80211_ops iwl3945_hw_ops = {
3828 .tx = iwl3945_mac_tx, 3823 .tx = iwl3945_mac_tx,
3829 .start = iwl3945_mac_start, 3824 .start = iwl3945_mac_start,
3830 .stop = iwl3945_mac_stop, 3825 .stop = iwl3945_mac_stop,
3831 .add_interface = iwl_mac_add_interface, 3826 .add_interface = iwl_mac_add_interface,
3832 .remove_interface = iwl_mac_remove_interface, 3827 .remove_interface = iwl_mac_remove_interface,
3833 .config = iwl_mac_config, 3828 .change_interface = iwl_mac_change_interface,
3829 .config = iwl_legacy_mac_config,
3834 .configure_filter = iwl3945_configure_filter, 3830 .configure_filter = iwl3945_configure_filter,
3835 .set_key = iwl3945_mac_set_key, 3831 .set_key = iwl3945_mac_set_key,
3836 .conf_tx = iwl_mac_conf_tx, 3832 .conf_tx = iwl_mac_conf_tx,
3837 .reset_tsf = iwl_mac_reset_tsf, 3833 .reset_tsf = iwl_legacy_mac_reset_tsf,
3838 .bss_info_changed = iwl_bss_info_changed, 3834 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3839 .hw_scan = iwl_mac_hw_scan, 3835 .hw_scan = iwl_mac_hw_scan,
3840 .sta_add = iwl3945_mac_sta_add, 3836 .sta_add = iwl3945_mac_sta_add,
3841 .sta_remove = iwl_mac_sta_remove, 3837 .sta_remove = iwl_mac_sta_remove,
@@ -3866,6 +3862,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3866 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; 3862 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3867 3863
3868 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; 3864 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
3865 priv->tx_power_next = IWL_DEFAULT_TX_POWER;
3869 3866
3870 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3867 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3871 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", 3868 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
@@ -3965,7 +3962,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3965 3962
3966 /* mac80211 allocates memory for this device instance, including 3963 /* mac80211 allocates memory for this device instance, including
3967 * space for this driver's private structure */ 3964 * space for this driver's private structure */
3968 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops); 3965 hw = iwl_alloc_all(cfg);
3969 if (hw == NULL) { 3966 if (hw == NULL) {
3970 pr_err("Can not allocate network device\n"); 3967 pr_err("Can not allocate network device\n");
3971 err = -ENOMEM; 3968 err = -ENOMEM;
@@ -4117,7 +4114,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4117 4114
4118 pci_enable_msi(priv->pci_dev); 4115 pci_enable_msi(priv->pci_dev);
4119 4116
4120 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr, 4117 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
4121 IRQF_SHARED, DRV_NAME, priv); 4118 IRQF_SHARED, DRV_NAME, priv);
4122 if (err) { 4119 if (err) {
4123 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); 4120 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4275,10 +4272,7 @@ static struct pci_driver iwl3945_driver = {
4275 .id_table = iwl3945_hw_card_ids, 4272 .id_table = iwl3945_hw_card_ids,
4276 .probe = iwl3945_pci_probe, 4273 .probe = iwl3945_pci_probe,
4277 .remove = __devexit_p(iwl3945_pci_remove), 4274 .remove = __devexit_p(iwl3945_pci_remove),
4278#ifdef CONFIG_PM 4275 .driver.pm = IWL_PM_OPS,
4279 .suspend = iwl_pci_suspend,
4280 .resume = iwl_pci_resume,
4281#endif
4282}; 4276};
4283 4277
4284static int __init iwl3945_init(void) 4278static int __init iwl3945_init(void)
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 330c7d9cf10..50dee6a0a5c 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -908,7 +908,7 @@ int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
908 return ret; 908 return ret;
909 } 909 }
910 910
911 iwm->scan_id = iwm->scan_id++ % IWM_SCAN_ID_MAX; 911 iwm->scan_id = (iwm->scan_id + 1) % IWM_SCAN_ID_MAX;
912 912
913 return 0; 913 return 0;
914} 914}
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 373930afc26..dee32d3681a 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -9,8 +9,6 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/ieee80211.h> 12#include <linux/ieee80211.h>
15#include <net/cfg80211.h> 13#include <net/cfg80211.h>
16#include <asm/unaligned.h> 14#include <asm/unaligned.h>
@@ -2062,7 +2060,7 @@ static void lbs_cfg_set_regulatory_hint(struct lbs_private *priv)
2062 }; 2060 };
2063 2061
2064 /* Section 5.17.2 */ 2062 /* Section 5.17.2 */
2065 static struct region_code_mapping regmap[] = { 2063 static const struct region_code_mapping regmap[] = {
2066 {"US ", 0x10}, /* US FCC */ 2064 {"US ", 0x10}, /* US FCC */
2067 {"CA ", 0x20}, /* Canada */ 2065 {"CA ", 0x20}, /* Canada */
2068 {"EU ", 0x30}, /* ETSI */ 2066 {"EU ", 0x30}, /* ETSI */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 70745928f3f..78c4da150a7 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -177,6 +177,14 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
177 struct cmd_ds_host_sleep cmd_config; 177 struct cmd_ds_host_sleep cmd_config;
178 int ret; 178 int ret;
179 179
180 /*
181 * Certain firmware versions do not support EHS_REMOVE_WAKEUP command
182 * and the card will return a failure. Since we need to be
183 * able to reset the mask, in those cases we set a 0 mask instead.
184 */
185 if (criteria == EHS_REMOVE_WAKEUP && !priv->ehs_remove_supported)
186 criteria = 0;
187
180 cmd_config.hdr.size = cpu_to_le16(sizeof(cmd_config)); 188 cmd_config.hdr.size = cpu_to_le16(sizeof(cmd_config));
181 cmd_config.criteria = cpu_to_le32(criteria); 189 cmd_config.criteria = cpu_to_le32(criteria);
182 cmd_config.gpio = priv->wol_gpio; 190 cmd_config.gpio = priv->wol_gpio;
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index cb14c38caf3..18dd9a02c45 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -138,6 +138,7 @@ struct lbs_private {
138 uint32_t wol_criteria; 138 uint32_t wol_criteria;
139 uint8_t wol_gpio; 139 uint8_t wol_gpio;
140 uint8_t wol_gap; 140 uint8_t wol_gap;
141 bool ehs_remove_supported;
141 142
142 /* Transmitting */ 143 /* Transmitting */
143 int tx_pending_len; /* -1 while building packet */ 144 int tx_pending_len; /* -1 while building packet */
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index efaf8503220..6524c70363d 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -345,6 +345,13 @@ static int if_usb_probe(struct usb_interface *intf,
345 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2)) 345 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2))
346 lbs_pr_err("cannot register lbs_flash_boot2 attribute\n"); 346 lbs_pr_err("cannot register lbs_flash_boot2 attribute\n");
347 347
348 /*
349 * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware.
350 */
351 priv->wol_criteria = EHS_REMOVE_WAKEUP;
352 if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
353 priv->ehs_remove_supported = false;
354
348 return 0; 355 return 0;
349 356
350err_start_card: 357err_start_card:
@@ -1090,12 +1097,6 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message)
1090 if (priv->psstate != PS_STATE_FULL_POWER) 1097 if (priv->psstate != PS_STATE_FULL_POWER)
1091 return -1; 1098 return -1;
1092 1099
1093 if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1094 lbs_pr_info("Suspend attempt without "
1095 "configuring wake params!\n");
1096 return -ENOSYS;
1097 }
1098
1099 ret = lbs_suspend(priv); 1100 ret = lbs_suspend(priv);
1100 if (ret) 1101 if (ret)
1101 goto out; 1102 goto out;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index fcd1bbfc632..6836a6dd985 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -851,9 +851,10 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
851 priv->work_thread = create_singlethread_workqueue("lbs_worker"); 851 priv->work_thread = create_singlethread_workqueue("lbs_worker");
852 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker); 852 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
853 853
854 priv->wol_criteria = 0xffffffff; 854 priv->wol_criteria = EHS_REMOVE_WAKEUP;
855 priv->wol_gpio = 0xff; 855 priv->wol_gpio = 0xff;
856 priv->wol_gap = 20; 856 priv->wol_gap = 20;
857 priv->ehs_remove_supported = true;
857 858
858 goto done; 859 goto done;
859 860
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index a4d0bca9ef2..a2b1df21d28 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -55,7 +55,9 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
55 struct rxpd *p_rx_pd; 55 struct rxpd *p_rx_pd;
56 int hdrchop; 56 int hdrchop;
57 struct ethhdr *p_ethhdr; 57 struct ethhdr *p_ethhdr;
58 const u8 rfc1042_eth_hdr[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; 58 static const u8 rfc1042_eth_hdr[] = {
59 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
60 };
59 61
60 lbs_deb_enter(LBS_DEB_RX); 62 lbs_deb_enter(LBS_DEB_RX);
61 63
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7eaaa3bab54..454f045ddff 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -309,6 +309,8 @@ struct mac80211_hwsim_data {
309 */ 309 */
310 u64 group; 310 u64 group;
311 struct dentry *debugfs_group; 311 struct dentry *debugfs_group;
312
313 int power_level;
312}; 314};
313 315
314 316
@@ -497,7 +499,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
497 rx_status.band = data->channel->band; 499 rx_status.band = data->channel->band;
498 rx_status.rate_idx = info->control.rates[0].idx; 500 rx_status.rate_idx = info->control.rates[0].idx;
499 /* TODO: simulate real signal strength (and optional packet loss) */ 501 /* TODO: simulate real signal strength (and optional packet loss) */
500 rx_status.signal = -50; 502 rx_status.signal = data->power_level - 50;
501 503
502 if (data->ps != PS_DISABLED) 504 if (data->ps != PS_DISABLED)
503 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 505 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
@@ -698,6 +700,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
698 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); 700 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
699 701
700 data->channel = conf->channel; 702 data->channel = conf->channel;
703 data->power_level = conf->power_level;
701 if (!data->started || !data->beacon_int) 704 if (!data->started || !data->beacon_int)
702 del_timer(&data->beacon_timer); 705 del_timer(&data->beacon_timer);
703 else 706 else
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index f152a25be59..9ecf8407cb1 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -29,6 +29,12 @@
29#define MWL8K_NAME KBUILD_MODNAME 29#define MWL8K_NAME KBUILD_MODNAME
30#define MWL8K_VERSION "0.12" 30#define MWL8K_VERSION "0.12"
31 31
32/* Module parameters */
33static unsigned ap_mode_default;
34module_param(ap_mode_default, bool, 0);
35MODULE_PARM_DESC(ap_mode_default,
36 "Set to 1 to make ap mode the default instead of sta mode");
37
32/* Register definitions */ 38/* Register definitions */
33#define MWL8K_HIU_GEN_PTR 0x00000c10 39#define MWL8K_HIU_GEN_PTR 0x00000c10
34#define MWL8K_MODE_STA 0x0000005a 40#define MWL8K_MODE_STA 0x0000005a
@@ -92,8 +98,10 @@ struct rxd_ops {
92struct mwl8k_device_info { 98struct mwl8k_device_info {
93 char *part_name; 99 char *part_name;
94 char *helper_image; 100 char *helper_image;
95 char *fw_image; 101 char *fw_image_sta;
102 char *fw_image_ap;
96 struct rxd_ops *ap_rxd_ops; 103 struct rxd_ops *ap_rxd_ops;
104 u32 fw_api_ap;
97}; 105};
98 106
99struct mwl8k_rx_queue { 107struct mwl8k_rx_queue {
@@ -136,8 +144,8 @@ struct mwl8k_priv {
136 void __iomem *regs; 144 void __iomem *regs;
137 145
138 /* firmware */ 146 /* firmware */
139 struct firmware *fw_helper; 147 const struct firmware *fw_helper;
140 struct firmware *fw_ucode; 148 const struct firmware *fw_ucode;
141 149
142 /* hardware/firmware parameters */ 150 /* hardware/firmware parameters */
143 bool ap_fw; 151 bool ap_fw;
@@ -210,6 +218,18 @@ struct mwl8k_priv {
210 218
211 /* Most recently reported noise in dBm */ 219 /* Most recently reported noise in dBm */
212 s8 noise; 220 s8 noise;
221
222 /*
223 * preserve the queue configurations so they can be restored if/when
224 * the firmware image is swapped.
225 */
226 struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_QUEUES];
227
228 /* async firmware loading state */
229 unsigned fw_state;
230 char *fw_pref;
231 char *fw_alt;
232 struct completion firmware_loading_complete;
213}; 233};
214 234
215/* Per interface specific private data */ 235/* Per interface specific private data */
@@ -285,8 +305,9 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
285}; 305};
286 306
287/* Set or get info from Firmware */ 307/* Set or get info from Firmware */
288#define MWL8K_CMD_SET 0x0001
289#define MWL8K_CMD_GET 0x0000 308#define MWL8K_CMD_GET 0x0000
309#define MWL8K_CMD_SET 0x0001
310#define MWL8K_CMD_SET_LIST 0x0002
290 311
291/* Firmware command codes */ 312/* Firmware command codes */
292#define MWL8K_CMD_CODE_DNLD 0x0001 313#define MWL8K_CMD_CODE_DNLD 0x0001
@@ -296,6 +317,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
296#define MWL8K_CMD_GET_STAT 0x0014 317#define MWL8K_CMD_GET_STAT 0x0014
297#define MWL8K_CMD_RADIO_CONTROL 0x001c 318#define MWL8K_CMD_RADIO_CONTROL 0x001c
298#define MWL8K_CMD_RF_TX_POWER 0x001e 319#define MWL8K_CMD_RF_TX_POWER 0x001e
320#define MWL8K_CMD_TX_POWER 0x001f
299#define MWL8K_CMD_RF_ANTENNA 0x0020 321#define MWL8K_CMD_RF_ANTENNA 0x0020
300#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */ 322#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
301#define MWL8K_CMD_SET_PRE_SCAN 0x0107 323#define MWL8K_CMD_SET_PRE_SCAN 0x0107
@@ -333,6 +355,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
333 MWL8K_CMDNAME(GET_STAT); 355 MWL8K_CMDNAME(GET_STAT);
334 MWL8K_CMDNAME(RADIO_CONTROL); 356 MWL8K_CMDNAME(RADIO_CONTROL);
335 MWL8K_CMDNAME(RF_TX_POWER); 357 MWL8K_CMDNAME(RF_TX_POWER);
358 MWL8K_CMDNAME(TX_POWER);
336 MWL8K_CMDNAME(RF_ANTENNA); 359 MWL8K_CMDNAME(RF_ANTENNA);
337 MWL8K_CMDNAME(SET_BEACON); 360 MWL8K_CMDNAME(SET_BEACON);
338 MWL8K_CMDNAME(SET_PRE_SCAN); 361 MWL8K_CMDNAME(SET_PRE_SCAN);
@@ -372,7 +395,7 @@ static void mwl8k_hw_reset(struct mwl8k_priv *priv)
372} 395}
373 396
374/* Release fw image */ 397/* Release fw image */
375static void mwl8k_release_fw(struct firmware **fw) 398static void mwl8k_release_fw(const struct firmware **fw)
376{ 399{
377 if (*fw == NULL) 400 if (*fw == NULL)
378 return; 401 return;
@@ -386,37 +409,68 @@ static void mwl8k_release_firmware(struct mwl8k_priv *priv)
386 mwl8k_release_fw(&priv->fw_helper); 409 mwl8k_release_fw(&priv->fw_helper);
387} 410}
388 411
412/* states for asynchronous f/w loading */
413static void mwl8k_fw_state_machine(const struct firmware *fw, void *context);
414enum {
415 FW_STATE_INIT = 0,
416 FW_STATE_LOADING_PREF,
417 FW_STATE_LOADING_ALT,
418 FW_STATE_ERROR,
419};
420
389/* Request fw image */ 421/* Request fw image */
390static int mwl8k_request_fw(struct mwl8k_priv *priv, 422static int mwl8k_request_fw(struct mwl8k_priv *priv,
391 const char *fname, struct firmware **fw) 423 const char *fname, const struct firmware **fw,
424 bool nowait)
392{ 425{
393 /* release current image */ 426 /* release current image */
394 if (*fw != NULL) 427 if (*fw != NULL)
395 mwl8k_release_fw(fw); 428 mwl8k_release_fw(fw);
396 429
397 return request_firmware((const struct firmware **)fw, 430 if (nowait)
398 fname, &priv->pdev->dev); 431 return request_firmware_nowait(THIS_MODULE, 1, fname,
432 &priv->pdev->dev, GFP_KERNEL,
433 priv, mwl8k_fw_state_machine);
434 else
435 return request_firmware(fw, fname, &priv->pdev->dev);
399} 436}
400 437
401static int mwl8k_request_firmware(struct mwl8k_priv *priv) 438static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image,
439 bool nowait)
402{ 440{
403 struct mwl8k_device_info *di = priv->device_info; 441 struct mwl8k_device_info *di = priv->device_info;
404 int rc; 442 int rc;
405 443
406 if (di->helper_image != NULL) { 444 if (di->helper_image != NULL) {
407 rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper); 445 if (nowait)
408 if (rc) { 446 rc = mwl8k_request_fw(priv, di->helper_image,
409 printk(KERN_ERR "%s: Error requesting helper " 447 &priv->fw_helper, true);
410 "firmware file %s\n", pci_name(priv->pdev), 448 else
411 di->helper_image); 449 rc = mwl8k_request_fw(priv, di->helper_image,
450 &priv->fw_helper, false);
451 if (rc)
452 printk(KERN_ERR "%s: Error requesting helper fw %s\n",
453 pci_name(priv->pdev), di->helper_image);
454
455 if (rc || nowait)
412 return rc; 456 return rc;
413 }
414 } 457 }
415 458
416 rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode); 459 if (nowait) {
460 /*
461 * if we get here, no helper image is needed. Skip the
462 * FW_STATE_INIT state.
463 */
464 priv->fw_state = FW_STATE_LOADING_PREF;
465 rc = mwl8k_request_fw(priv, fw_image,
466 &priv->fw_ucode,
467 true);
468 } else
469 rc = mwl8k_request_fw(priv, fw_image,
470 &priv->fw_ucode, false);
417 if (rc) { 471 if (rc) {
418 printk(KERN_ERR "%s: Error requesting firmware file %s\n", 472 printk(KERN_ERR "%s: Error requesting firmware file %s\n",
419 pci_name(priv->pdev), di->fw_image); 473 pci_name(priv->pdev), fw_image);
420 mwl8k_release_fw(&priv->fw_helper); 474 mwl8k_release_fw(&priv->fw_helper);
421 return rc; 475 return rc;
422 } 476 }
@@ -577,12 +631,12 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
577static int mwl8k_load_firmware(struct ieee80211_hw *hw) 631static int mwl8k_load_firmware(struct ieee80211_hw *hw)
578{ 632{
579 struct mwl8k_priv *priv = hw->priv; 633 struct mwl8k_priv *priv = hw->priv;
580 struct firmware *fw = priv->fw_ucode; 634 const struct firmware *fw = priv->fw_ucode;
581 int rc; 635 int rc;
582 int loops; 636 int loops;
583 637
584 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) { 638 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
585 struct firmware *helper = priv->fw_helper; 639 const struct firmware *helper = priv->fw_helper;
586 640
587 if (helper == NULL) { 641 if (helper == NULL) {
588 printk(KERN_ERR "%s: helper image needed but none " 642 printk(KERN_ERR "%s: helper image needed but none "
@@ -1811,6 +1865,7 @@ struct mwl8k_cmd_get_hw_spec_ap {
1811 __le32 wcbbase1; 1865 __le32 wcbbase1;
1812 __le32 wcbbase2; 1866 __le32 wcbbase2;
1813 __le32 wcbbase3; 1867 __le32 wcbbase3;
1868 __le32 fw_api_version;
1814} __packed; 1869} __packed;
1815 1870
1816static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) 1871static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
@@ -1818,6 +1873,7 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1818 struct mwl8k_priv *priv = hw->priv; 1873 struct mwl8k_priv *priv = hw->priv;
1819 struct mwl8k_cmd_get_hw_spec_ap *cmd; 1874 struct mwl8k_cmd_get_hw_spec_ap *cmd;
1820 int rc; 1875 int rc;
1876 u32 api_version;
1821 1877
1822 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1878 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1823 if (cmd == NULL) 1879 if (cmd == NULL)
@@ -1834,6 +1890,16 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1834 if (!rc) { 1890 if (!rc) {
1835 int off; 1891 int off;
1836 1892
1893 api_version = le32_to_cpu(cmd->fw_api_version);
1894 if (priv->device_info->fw_api_ap != api_version) {
1895 printk(KERN_ERR "%s: Unsupported fw API version for %s."
1896 " Expected %d got %d.\n", MWL8K_NAME,
1897 priv->device_info->part_name,
1898 priv->device_info->fw_api_ap,
1899 api_version);
1900 rc = -EINVAL;
1901 goto done;
1902 }
1837 SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); 1903 SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
1838 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 1904 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1839 priv->fw_rev = le32_to_cpu(cmd->fw_rev); 1905 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
@@ -1861,6 +1927,7 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1861 iowrite32(priv->txq[3].txd_dma, priv->sram + off); 1927 iowrite32(priv->txq[3].txd_dma, priv->sram + off);
1862 } 1928 }
1863 1929
1930done:
1864 kfree(cmd); 1931 kfree(cmd);
1865 return rc; 1932 return rc;
1866} 1933}
@@ -2084,7 +2151,7 @@ mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
2084/* 2151/*
2085 * CMD_RF_TX_POWER. 2152 * CMD_RF_TX_POWER.
2086 */ 2153 */
2087#define MWL8K_TX_POWER_LEVEL_TOTAL 8 2154#define MWL8K_RF_TX_POWER_LEVEL_TOTAL 8
2088 2155
2089struct mwl8k_cmd_rf_tx_power { 2156struct mwl8k_cmd_rf_tx_power {
2090 struct mwl8k_cmd_pkt header; 2157 struct mwl8k_cmd_pkt header;
@@ -2092,7 +2159,7 @@ struct mwl8k_cmd_rf_tx_power {
2092 __le16 support_level; 2159 __le16 support_level;
2093 __le16 current_level; 2160 __le16 current_level;
2094 __le16 reserved; 2161 __le16 reserved;
2095 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; 2162 __le16 power_level_list[MWL8K_RF_TX_POWER_LEVEL_TOTAL];
2096} __packed; 2163} __packed;
2097 2164
2098static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm) 2165static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
@@ -2116,6 +2183,65 @@ static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
2116} 2183}
2117 2184
2118/* 2185/*
2186 * CMD_TX_POWER.
2187 */
2188#define MWL8K_TX_POWER_LEVEL_TOTAL 12
2189
2190struct mwl8k_cmd_tx_power {
2191 struct mwl8k_cmd_pkt header;
2192 __le16 action;
2193 __le16 band;
2194 __le16 channel;
2195 __le16 bw;
2196 __le16 sub_ch;
2197 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
2198} __attribute__((packed));
2199
2200static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
2201 struct ieee80211_conf *conf,
2202 unsigned short pwr)
2203{
2204 struct ieee80211_channel *channel = conf->channel;
2205 struct mwl8k_cmd_tx_power *cmd;
2206 int rc;
2207 int i;
2208
2209 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2210 if (cmd == NULL)
2211 return -ENOMEM;
2212
2213 cmd->header.code = cpu_to_le16(MWL8K_CMD_TX_POWER);
2214 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2215 cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST);
2216
2217 if (channel->band == IEEE80211_BAND_2GHZ)
2218 cmd->band = cpu_to_le16(0x1);
2219 else if (channel->band == IEEE80211_BAND_5GHZ)
2220 cmd->band = cpu_to_le16(0x4);
2221
2222 cmd->channel = channel->hw_value;
2223
2224 if (conf->channel_type == NL80211_CHAN_NO_HT ||
2225 conf->channel_type == NL80211_CHAN_HT20) {
2226 cmd->bw = cpu_to_le16(0x2);
2227 } else {
2228 cmd->bw = cpu_to_le16(0x4);
2229 if (conf->channel_type == NL80211_CHAN_HT40MINUS)
2230 cmd->sub_ch = cpu_to_le16(0x3);
2231 else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
2232 cmd->sub_ch = cpu_to_le16(0x1);
2233 }
2234
2235 for (i = 0; i < MWL8K_TX_POWER_LEVEL_TOTAL; i++)
2236 cmd->power_level_list[i] = cpu_to_le16(pwr);
2237
2238 rc = mwl8k_post_cmd(hw, &cmd->header);
2239 kfree(cmd);
2240
2241 return rc;
2242}
2243
2244/*
2119 * CMD_RF_ANTENNA. 2245 * CMD_RF_ANTENNA.
2120 */ 2246 */
2121struct mwl8k_cmd_rf_antenna { 2247struct mwl8k_cmd_rf_antenna {
@@ -3283,13 +3409,16 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
3283 mwl8k_txq_reclaim(hw, i, INT_MAX, 1); 3409 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
3284} 3410}
3285 3411
3412static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image);
3413
3286static int mwl8k_add_interface(struct ieee80211_hw *hw, 3414static int mwl8k_add_interface(struct ieee80211_hw *hw,
3287 struct ieee80211_vif *vif) 3415 struct ieee80211_vif *vif)
3288{ 3416{
3289 struct mwl8k_priv *priv = hw->priv; 3417 struct mwl8k_priv *priv = hw->priv;
3290 struct mwl8k_vif *mwl8k_vif; 3418 struct mwl8k_vif *mwl8k_vif;
3291 u32 macids_supported; 3419 u32 macids_supported;
3292 int macid; 3420 int macid, rc;
3421 struct mwl8k_device_info *di;
3293 3422
3294 /* 3423 /*
3295 * Reject interface creation if sniffer mode is active, as 3424 * Reject interface creation if sniffer mode is active, as
@@ -3302,12 +3431,28 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
3302 return -EINVAL; 3431 return -EINVAL;
3303 } 3432 }
3304 3433
3305 3434 di = priv->device_info;
3306 switch (vif->type) { 3435 switch (vif->type) {
3307 case NL80211_IFTYPE_AP: 3436 case NL80211_IFTYPE_AP:
3437 if (!priv->ap_fw && di->fw_image_ap) {
3438 /* we must load the ap fw to meet this request */
3439 if (!list_empty(&priv->vif_list))
3440 return -EBUSY;
3441 rc = mwl8k_reload_firmware(hw, di->fw_image_ap);
3442 if (rc)
3443 return rc;
3444 }
3308 macids_supported = priv->ap_macids_supported; 3445 macids_supported = priv->ap_macids_supported;
3309 break; 3446 break;
3310 case NL80211_IFTYPE_STATION: 3447 case NL80211_IFTYPE_STATION:
3448 if (priv->ap_fw && di->fw_image_sta) {
3449 /* we must load the sta fw to meet this request */
3450 if (!list_empty(&priv->vif_list))
3451 return -EBUSY;
3452 rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
3453 if (rc)
3454 return rc;
3455 }
3311 macids_supported = priv->sta_macids_supported; 3456 macids_supported = priv->sta_macids_supported;
3312 break; 3457 break;
3313 default: 3458 default:
@@ -3377,15 +3522,19 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
3377 3522
3378 if (conf->power_level > 18) 3523 if (conf->power_level > 18)
3379 conf->power_level = 18; 3524 conf->power_level = 18;
3380 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
3381 if (rc)
3382 goto out;
3383 3525
3384 if (priv->ap_fw) { 3526 if (priv->ap_fw) {
3527 rc = mwl8k_cmd_tx_power(hw, conf, conf->power_level);
3528 if (rc)
3529 goto out;
3530
3385 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7); 3531 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7);
3386 if (!rc) 3532 if (!rc)
3387 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7); 3533 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
3388 } else { 3534 } else {
3535 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
3536 if (rc)
3537 goto out;
3389 rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7); 3538 rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7);
3390 } 3539 }
3391 3540
@@ -3739,6 +3888,9 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3739 3888
3740 rc = mwl8k_fw_lock(hw); 3889 rc = mwl8k_fw_lock(hw);
3741 if (!rc) { 3890 if (!rc) {
3891 BUG_ON(queue > MWL8K_TX_QUEUES - 1);
3892 memcpy(&priv->wmm_params[queue], params, sizeof(*params));
3893
3742 if (!priv->wmm_enabled) 3894 if (!priv->wmm_enabled)
3743 rc = mwl8k_cmd_set_wmm_mode(hw, 1); 3895 rc = mwl8k_cmd_set_wmm_mode(hw, 1);
3744 3896
@@ -3838,21 +3990,27 @@ enum {
3838 MWL8366, 3990 MWL8366,
3839}; 3991};
3840 3992
3993#define MWL8K_8366_AP_FW_API 1
3994#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
3995#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
3996
3841static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = { 3997static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
3842 [MWL8363] = { 3998 [MWL8363] = {
3843 .part_name = "88w8363", 3999 .part_name = "88w8363",
3844 .helper_image = "mwl8k/helper_8363.fw", 4000 .helper_image = "mwl8k/helper_8363.fw",
3845 .fw_image = "mwl8k/fmimage_8363.fw", 4001 .fw_image_sta = "mwl8k/fmimage_8363.fw",
3846 }, 4002 },
3847 [MWL8687] = { 4003 [MWL8687] = {
3848 .part_name = "88w8687", 4004 .part_name = "88w8687",
3849 .helper_image = "mwl8k/helper_8687.fw", 4005 .helper_image = "mwl8k/helper_8687.fw",
3850 .fw_image = "mwl8k/fmimage_8687.fw", 4006 .fw_image_sta = "mwl8k/fmimage_8687.fw",
3851 }, 4007 },
3852 [MWL8366] = { 4008 [MWL8366] = {
3853 .part_name = "88w8366", 4009 .part_name = "88w8366",
3854 .helper_image = "mwl8k/helper_8366.fw", 4010 .helper_image = "mwl8k/helper_8366.fw",
3855 .fw_image = "mwl8k/fmimage_8366.fw", 4011 .fw_image_sta = "mwl8k/fmimage_8366.fw",
4012 .fw_image_ap = MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API),
4013 .fw_api_ap = MWL8K_8366_AP_FW_API,
3856 .ap_rxd_ops = &rxd_8366_ap_ops, 4014 .ap_rxd_ops = &rxd_8366_ap_ops,
3857 }, 4015 },
3858}; 4016};
@@ -3863,6 +4021,7 @@ MODULE_FIRMWARE("mwl8k/helper_8687.fw");
3863MODULE_FIRMWARE("mwl8k/fmimage_8687.fw"); 4021MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
3864MODULE_FIRMWARE("mwl8k/helper_8366.fw"); 4022MODULE_FIRMWARE("mwl8k/helper_8366.fw");
3865MODULE_FIRMWARE("mwl8k/fmimage_8366.fw"); 4023MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
4024MODULE_FIRMWARE(MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API));
3866 4025
3867static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { 4026static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
3868 { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, }, 4027 { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, },
@@ -3876,94 +4035,133 @@ static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
3876}; 4035};
3877MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table); 4036MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
3878 4037
3879static int __devinit mwl8k_probe(struct pci_dev *pdev, 4038static int mwl8k_request_alt_fw(struct mwl8k_priv *priv)
3880 const struct pci_device_id *id)
3881{ 4039{
3882 static int printed_version = 0;
3883 struct ieee80211_hw *hw;
3884 struct mwl8k_priv *priv;
3885 int rc; 4040 int rc;
3886 int i; 4041 printk(KERN_ERR "%s: Error requesting preferred fw %s.\n"
3887 4042 "Trying alternative firmware %s\n", pci_name(priv->pdev),
3888 if (!printed_version) { 4043 priv->fw_pref, priv->fw_alt);
3889 printk(KERN_INFO "%s version %s\n", MWL8K_DESC, MWL8K_VERSION); 4044 rc = mwl8k_request_fw(priv, priv->fw_alt, &priv->fw_ucode, true);
3890 printed_version = 1;
3891 }
3892
3893
3894 rc = pci_enable_device(pdev);
3895 if (rc) { 4045 if (rc) {
3896 printk(KERN_ERR "%s: Cannot enable new PCI device\n", 4046 printk(KERN_ERR "%s: Error requesting alt fw %s\n",
3897 MWL8K_NAME); 4047 pci_name(priv->pdev), priv->fw_alt);
3898 return rc; 4048 return rc;
3899 } 4049 }
4050 return 0;
4051}
3900 4052
3901 rc = pci_request_regions(pdev, MWL8K_NAME); 4053static int mwl8k_firmware_load_success(struct mwl8k_priv *priv);
3902 if (rc) { 4054static void mwl8k_fw_state_machine(const struct firmware *fw, void *context)
3903 printk(KERN_ERR "%s: Cannot obtain PCI resources\n", 4055{
3904 MWL8K_NAME); 4056 struct mwl8k_priv *priv = context;
3905 goto err_disable_device; 4057 struct mwl8k_device_info *di = priv->device_info;
3906 } 4058 int rc;
3907
3908 pci_set_master(pdev);
3909
3910
3911 hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
3912 if (hw == NULL) {
3913 printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
3914 rc = -ENOMEM;
3915 goto err_free_reg;
3916 }
3917 4059
3918 SET_IEEE80211_DEV(hw, &pdev->dev); 4060 switch (priv->fw_state) {
3919 pci_set_drvdata(pdev, hw); 4061 case FW_STATE_INIT:
4062 if (!fw) {
4063 printk(KERN_ERR "%s: Error requesting helper fw %s\n",
4064 pci_name(priv->pdev), di->helper_image);
4065 goto fail;
4066 }
4067 priv->fw_helper = fw;
4068 rc = mwl8k_request_fw(priv, priv->fw_pref, &priv->fw_ucode,
4069 true);
4070 if (rc && priv->fw_alt) {
4071 rc = mwl8k_request_alt_fw(priv);
4072 if (rc)
4073 goto fail;
4074 priv->fw_state = FW_STATE_LOADING_ALT;
4075 } else if (rc)
4076 goto fail;
4077 else
4078 priv->fw_state = FW_STATE_LOADING_PREF;
4079 break;
3920 4080
3921 priv = hw->priv; 4081 case FW_STATE_LOADING_PREF:
3922 priv->hw = hw; 4082 if (!fw) {
3923 priv->pdev = pdev; 4083 if (priv->fw_alt) {
3924 priv->device_info = &mwl8k_info_tbl[id->driver_data]; 4084 rc = mwl8k_request_alt_fw(priv);
4085 if (rc)
4086 goto fail;
4087 priv->fw_state = FW_STATE_LOADING_ALT;
4088 } else
4089 goto fail;
4090 } else {
4091 priv->fw_ucode = fw;
4092 rc = mwl8k_firmware_load_success(priv);
4093 if (rc)
4094 goto fail;
4095 else
4096 complete(&priv->firmware_loading_complete);
4097 }
4098 break;
3925 4099
4100 case FW_STATE_LOADING_ALT:
4101 if (!fw) {
4102 printk(KERN_ERR "%s: Error requesting alt fw %s\n",
4103 pci_name(priv->pdev), di->helper_image);
4104 goto fail;
4105 }
4106 priv->fw_ucode = fw;
4107 rc = mwl8k_firmware_load_success(priv);
4108 if (rc)
4109 goto fail;
4110 else
4111 complete(&priv->firmware_loading_complete);
4112 break;
3926 4113
3927 priv->sram = pci_iomap(pdev, 0, 0x10000); 4114 default:
3928 if (priv->sram == NULL) { 4115 printk(KERN_ERR "%s: Unexpected firmware loading state: %d\n",
3929 wiphy_err(hw->wiphy, "Cannot map device SRAM\n"); 4116 MWL8K_NAME, priv->fw_state);
3930 goto err_iounmap; 4117 BUG_ON(1);
3931 } 4118 }
3932 4119
3933 /* 4120 return;
3934 * If BAR0 is a 32 bit BAR, the register BAR will be BAR1.
3935 * If BAR0 is a 64 bit BAR, the register BAR will be BAR2.
3936 */
3937 priv->regs = pci_iomap(pdev, 1, 0x10000);
3938 if (priv->regs == NULL) {
3939 priv->regs = pci_iomap(pdev, 2, 0x10000);
3940 if (priv->regs == NULL) {
3941 wiphy_err(hw->wiphy, "Cannot map device registers\n");
3942 goto err_iounmap;
3943 }
3944 }
3945 4121
4122fail:
4123 priv->fw_state = FW_STATE_ERROR;
4124 complete(&priv->firmware_loading_complete);
4125 device_release_driver(&priv->pdev->dev);
4126 mwl8k_release_firmware(priv);
4127}
4128
4129static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
4130 bool nowait)
4131{
4132 struct mwl8k_priv *priv = hw->priv;
4133 int rc;
3946 4134
3947 /* Reset firmware and hardware */ 4135 /* Reset firmware and hardware */
3948 mwl8k_hw_reset(priv); 4136 mwl8k_hw_reset(priv);
3949 4137
3950 /* Ask userland hotplug daemon for the device firmware */ 4138 /* Ask userland hotplug daemon for the device firmware */
3951 rc = mwl8k_request_firmware(priv); 4139 rc = mwl8k_request_firmware(priv, fw_image, nowait);
3952 if (rc) { 4140 if (rc) {
3953 wiphy_err(hw->wiphy, "Firmware files not found\n"); 4141 wiphy_err(hw->wiphy, "Firmware files not found\n");
3954 goto err_stop_firmware; 4142 return rc;
3955 } 4143 }
3956 4144
4145 if (nowait)
4146 return rc;
4147
3957 /* Load firmware into hardware */ 4148 /* Load firmware into hardware */
3958 rc = mwl8k_load_firmware(hw); 4149 rc = mwl8k_load_firmware(hw);
3959 if (rc) { 4150 if (rc)
3960 wiphy_err(hw->wiphy, "Cannot start firmware\n"); 4151 wiphy_err(hw->wiphy, "Cannot start firmware\n");
3961 goto err_stop_firmware;
3962 }
3963 4152
3964 /* Reclaim memory once firmware is successfully loaded */ 4153 /* Reclaim memory once firmware is successfully loaded */
3965 mwl8k_release_firmware(priv); 4154 mwl8k_release_firmware(priv);
3966 4155
4156 return rc;
4157}
4158
4159/* initialize hw after successfully loading a firmware image */
4160static int mwl8k_probe_hw(struct ieee80211_hw *hw)
4161{
4162 struct mwl8k_priv *priv = hw->priv;
4163 int rc = 0;
4164 int i;
3967 4165
3968 if (priv->ap_fw) { 4166 if (priv->ap_fw) {
3969 priv->rxd_ops = priv->device_info->ap_rxd_ops; 4167 priv->rxd_ops = priv->device_info->ap_rxd_ops;
@@ -3980,58 +4178,11 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3980 priv->wmm_enabled = false; 4178 priv->wmm_enabled = false;
3981 priv->pending_tx_pkts = 0; 4179 priv->pending_tx_pkts = 0;
3982 4180
3983
3984 /*
3985 * Extra headroom is the size of the required DMA header
3986 * minus the size of the smallest 802.11 frame (CTS frame).
3987 */
3988 hw->extra_tx_headroom =
3989 sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts);
3990
3991 hw->channel_change_time = 10;
3992
3993 hw->queues = MWL8K_TX_QUEUES;
3994
3995 /* Set rssi values to dBm */
3996 hw->flags |= IEEE80211_HW_SIGNAL_DBM;
3997 hw->vif_data_size = sizeof(struct mwl8k_vif);
3998 hw->sta_data_size = sizeof(struct mwl8k_sta);
3999
4000 priv->macids_used = 0;
4001 INIT_LIST_HEAD(&priv->vif_list);
4002
4003 /* Set default radio state and preamble */
4004 priv->radio_on = 0;
4005 priv->radio_short_preamble = 0;
4006
4007 /* Finalize join worker */
4008 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
4009
4010 /* TX reclaim and RX tasklets. */
4011 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
4012 tasklet_disable(&priv->poll_tx_task);
4013 tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
4014 tasklet_disable(&priv->poll_rx_task);
4015
4016 /* Power management cookie */
4017 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
4018 if (priv->cookie == NULL)
4019 goto err_stop_firmware;
4020
4021 rc = mwl8k_rxq_init(hw, 0); 4181 rc = mwl8k_rxq_init(hw, 0);
4022 if (rc) 4182 if (rc)
4023 goto err_free_cookie; 4183 goto err_stop_firmware;
4024 rxq_refill(hw, 0, INT_MAX); 4184 rxq_refill(hw, 0, INT_MAX);
4025 4185
4026 mutex_init(&priv->fw_mutex);
4027 priv->fw_mutex_owner = NULL;
4028 priv->fw_mutex_depth = 0;
4029 priv->hostcmd_wait = NULL;
4030
4031 spin_lock_init(&priv->tx_lock);
4032
4033 priv->tx_wait = NULL;
4034
4035 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 4186 for (i = 0; i < MWL8K_TX_QUEUES; i++) {
4036 rc = mwl8k_txq_init(hw, i); 4187 rc = mwl8k_txq_init(hw, i);
4037 if (rc) 4188 if (rc)
@@ -4071,13 +4222,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4071 goto err_free_irq; 4222 goto err_free_irq;
4072 } 4223 }
4073 4224
4074 hw->wiphy->interface_modes = 0;
4075 if (priv->ap_macids_supported)
4076 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
4077 if (priv->sta_macids_supported)
4078 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
4079
4080
4081 /* Turn radio off */ 4225 /* Turn radio off */
4082 rc = mwl8k_cmd_radio_disable(hw); 4226 rc = mwl8k_cmd_radio_disable(hw);
4083 if (rc) { 4227 if (rc) {
@@ -4096,12 +4240,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4096 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4240 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
4097 free_irq(priv->pdev->irq, hw); 4241 free_irq(priv->pdev->irq, hw);
4098 4242
4099 rc = ieee80211_register_hw(hw);
4100 if (rc) {
4101 wiphy_err(hw->wiphy, "Cannot register device\n");
4102 goto err_free_queues;
4103 }
4104
4105 wiphy_info(hw->wiphy, "%s v%d, %pm, %s firmware %u.%u.%u.%u\n", 4243 wiphy_info(hw->wiphy, "%s v%d, %pm, %s firmware %u.%u.%u.%u\n",
4106 priv->device_info->part_name, 4244 priv->device_info->part_name,
4107 priv->hw_rev, hw->wiphy->perm_addr, 4245 priv->hw_rev, hw->wiphy->perm_addr,
@@ -4120,14 +4258,238 @@ err_free_queues:
4120 mwl8k_txq_deinit(hw, i); 4258 mwl8k_txq_deinit(hw, i);
4121 mwl8k_rxq_deinit(hw, 0); 4259 mwl8k_rxq_deinit(hw, 0);
4122 4260
4261err_stop_firmware:
4262 mwl8k_hw_reset(priv);
4263
4264 return rc;
4265}
4266
4267/*
4268 * invoke mwl8k_reload_firmware to change the firmware image after the device
4269 * has already been registered
4270 */
4271static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
4272{
4273 int i, rc = 0;
4274 struct mwl8k_priv *priv = hw->priv;
4275
4276 mwl8k_stop(hw);
4277 mwl8k_rxq_deinit(hw, 0);
4278
4279 for (i = 0; i < MWL8K_TX_QUEUES; i++)
4280 mwl8k_txq_deinit(hw, i);
4281
4282 rc = mwl8k_init_firmware(hw, fw_image, false);
4283 if (rc)
4284 goto fail;
4285
4286 rc = mwl8k_probe_hw(hw);
4287 if (rc)
4288 goto fail;
4289
4290 rc = mwl8k_start(hw);
4291 if (rc)
4292 goto fail;
4293
4294 rc = mwl8k_config(hw, ~0);
4295 if (rc)
4296 goto fail;
4297
4298 for (i = 0; i < MWL8K_TX_QUEUES; i++) {
4299 rc = mwl8k_conf_tx(hw, i, &priv->wmm_params[i]);
4300 if (rc)
4301 goto fail;
4302 }
4303
4304 return rc;
4305
4306fail:
4307 printk(KERN_WARNING "mwl8k: Failed to reload firmware image.\n");
4308 return rc;
4309}
4310
4311static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4312{
4313 struct ieee80211_hw *hw = priv->hw;
4314 int i, rc;
4315
4316 rc = mwl8k_load_firmware(hw);
4317 mwl8k_release_firmware(priv);
4318 if (rc) {
4319 wiphy_err(hw->wiphy, "Cannot start firmware\n");
4320 return rc;
4321 }
4322
4323 /*
4324 * Extra headroom is the size of the required DMA header
4325 * minus the size of the smallest 802.11 frame (CTS frame).
4326 */
4327 hw->extra_tx_headroom =
4328 sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts);
4329
4330 hw->channel_change_time = 10;
4331
4332 hw->queues = MWL8K_TX_QUEUES;
4333
4334 /* Set rssi values to dBm */
4335 hw->flags |= IEEE80211_HW_SIGNAL_DBM;
4336 hw->vif_data_size = sizeof(struct mwl8k_vif);
4337 hw->sta_data_size = sizeof(struct mwl8k_sta);
4338
4339 priv->macids_used = 0;
4340 INIT_LIST_HEAD(&priv->vif_list);
4341
4342 /* Set default radio state and preamble */
4343 priv->radio_on = 0;
4344 priv->radio_short_preamble = 0;
4345
4346 /* Finalize join worker */
4347 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
4348
4349 /* TX reclaim and RX tasklets. */
4350 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
4351 tasklet_disable(&priv->poll_tx_task);
4352 tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
4353 tasklet_disable(&priv->poll_rx_task);
4354
4355 /* Power management cookie */
4356 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
4357 if (priv->cookie == NULL)
4358 return -ENOMEM;
4359
4360 mutex_init(&priv->fw_mutex);
4361 priv->fw_mutex_owner = NULL;
4362 priv->fw_mutex_depth = 0;
4363 priv->hostcmd_wait = NULL;
4364
4365 spin_lock_init(&priv->tx_lock);
4366
4367 priv->tx_wait = NULL;
4368
4369 rc = mwl8k_probe_hw(hw);
4370 if (rc)
4371 goto err_free_cookie;
4372
4373 hw->wiphy->interface_modes = 0;
4374 if (priv->ap_macids_supported || priv->device_info->fw_image_ap)
4375 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
4376 if (priv->sta_macids_supported || priv->device_info->fw_image_sta)
4377 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
4378
4379 rc = ieee80211_register_hw(hw);
4380 if (rc) {
4381 wiphy_err(hw->wiphy, "Cannot register device\n");
4382 goto err_unprobe_hw;
4383 }
4384
4385 return 0;
4386
4387err_unprobe_hw:
4388 for (i = 0; i < MWL8K_TX_QUEUES; i++)
4389 mwl8k_txq_deinit(hw, i);
4390 mwl8k_rxq_deinit(hw, 0);
4391
4123err_free_cookie: 4392err_free_cookie:
4124 if (priv->cookie != NULL) 4393 if (priv->cookie != NULL)
4125 pci_free_consistent(priv->pdev, 4, 4394 pci_free_consistent(priv->pdev, 4,
4126 priv->cookie, priv->cookie_dma); 4395 priv->cookie, priv->cookie_dma);
4127 4396
4397 return rc;
4398}
4399static int __devinit mwl8k_probe(struct pci_dev *pdev,
4400 const struct pci_device_id *id)
4401{
4402 static int printed_version;
4403 struct ieee80211_hw *hw;
4404 struct mwl8k_priv *priv;
4405 struct mwl8k_device_info *di;
4406 int rc;
4407
4408 if (!printed_version) {
4409 printk(KERN_INFO "%s version %s\n", MWL8K_DESC, MWL8K_VERSION);
4410 printed_version = 1;
4411 }
4412
4413
4414 rc = pci_enable_device(pdev);
4415 if (rc) {
4416 printk(KERN_ERR "%s: Cannot enable new PCI device\n",
4417 MWL8K_NAME);
4418 return rc;
4419 }
4420
4421 rc = pci_request_regions(pdev, MWL8K_NAME);
4422 if (rc) {
4423 printk(KERN_ERR "%s: Cannot obtain PCI resources\n",
4424 MWL8K_NAME);
4425 goto err_disable_device;
4426 }
4427
4428 pci_set_master(pdev);
4429
4430
4431 hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
4432 if (hw == NULL) {
4433 printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
4434 rc = -ENOMEM;
4435 goto err_free_reg;
4436 }
4437
4438 SET_IEEE80211_DEV(hw, &pdev->dev);
4439 pci_set_drvdata(pdev, hw);
4440
4441 priv = hw->priv;
4442 priv->hw = hw;
4443 priv->pdev = pdev;
4444 priv->device_info = &mwl8k_info_tbl[id->driver_data];
4445
4446
4447 priv->sram = pci_iomap(pdev, 0, 0x10000);
4448 if (priv->sram == NULL) {
4449 wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
4450 goto err_iounmap;
4451 }
4452
4453 /*
4454 * If BAR0 is a 32 bit BAR, the register BAR will be BAR1.
4455 * If BAR0 is a 64 bit BAR, the register BAR will be BAR2.
4456 */
4457 priv->regs = pci_iomap(pdev, 1, 0x10000);
4458 if (priv->regs == NULL) {
4459 priv->regs = pci_iomap(pdev, 2, 0x10000);
4460 if (priv->regs == NULL) {
4461 wiphy_err(hw->wiphy, "Cannot map device registers\n");
4462 goto err_iounmap;
4463 }
4464 }
4465
4466 /*
4467 * Choose the initial fw image depending on user input. If a second
4468 * image is available, make it the alternative image that will be
4469 * loaded if the first one fails.
4470 */
4471 init_completion(&priv->firmware_loading_complete);
4472 di = priv->device_info;
4473 if (ap_mode_default && di->fw_image_ap) {
4474 priv->fw_pref = di->fw_image_ap;
4475 priv->fw_alt = di->fw_image_sta;
4476 } else if (!ap_mode_default && di->fw_image_sta) {
4477 priv->fw_pref = di->fw_image_sta;
4478 priv->fw_alt = di->fw_image_ap;
4479 } else if (ap_mode_default && !di->fw_image_ap && di->fw_image_sta) {
4480 printk(KERN_WARNING "AP fw is unavailable. Using STA fw.");
4481 priv->fw_pref = di->fw_image_sta;
4482 } else if (!ap_mode_default && !di->fw_image_sta && di->fw_image_ap) {
4483 printk(KERN_WARNING "STA fw is unavailable. Using AP fw.");
4484 priv->fw_pref = di->fw_image_ap;
4485 }
4486 rc = mwl8k_init_firmware(hw, priv->fw_pref, true);
4487 if (rc)
4488 goto err_stop_firmware;
4489 return rc;
4490
4128err_stop_firmware: 4491err_stop_firmware:
4129 mwl8k_hw_reset(priv); 4492 mwl8k_hw_reset(priv);
4130 mwl8k_release_firmware(priv);
4131 4493
4132err_iounmap: 4494err_iounmap:
4133 if (priv->regs != NULL) 4495 if (priv->regs != NULL)
@@ -4163,6 +4525,13 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
4163 return; 4525 return;
4164 priv = hw->priv; 4526 priv = hw->priv;
4165 4527
4528 wait_for_completion(&priv->firmware_loading_complete);
4529
4530 if (priv->fw_state == FW_STATE_ERROR) {
4531 mwl8k_hw_reset(priv);
4532 goto unmap;
4533 }
4534
4166 ieee80211_stop_queues(hw); 4535 ieee80211_stop_queues(hw);
4167 4536
4168 ieee80211_unregister_hw(hw); 4537 ieee80211_unregister_hw(hw);
@@ -4185,6 +4554,7 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
4185 4554
4186 pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma); 4555 pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma);
4187 4556
4557unmap:
4188 pci_iounmap(pdev, priv->regs); 4558 pci_iounmap(pdev, priv->regs);
4189 pci_iounmap(pdev, priv->sram); 4559 pci_iounmap(pdev, priv->sram);
4190 pci_set_drvdata(pdev, NULL); 4560 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 97007d9e2c1..2b1cbba90a8 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2286,8 +2286,8 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2286 struct ethhdr *peth; 2286 struct ethhdr *peth;
2287 UCHAR srcaddr[ADDRLEN]; 2287 UCHAR srcaddr[ADDRLEN];
2288 UCHAR destaddr[ADDRLEN]; 2288 UCHAR destaddr[ADDRLEN];
2289 static UCHAR org_bridge[3] = { 0, 0, 0xf8 }; 2289 static const UCHAR org_bridge[3] = { 0, 0, 0xf8 };
2290 static UCHAR org_1042[3] = { 0, 0, 0 }; 2290 static const UCHAR org_1042[3] = { 0, 0, 0 };
2291 2291
2292 memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN); 2292 memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN);
2293 memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN); 2293 memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 71b5971da59..19f3d568f70 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -156,6 +156,12 @@ MODULE_PARM_DESC(workaround_interval,
156#define RNDIS_STATUS_ADAPTER_NOT_OPEN cpu_to_le32(0xc0010012) 156#define RNDIS_STATUS_ADAPTER_NOT_OPEN cpu_to_le32(0xc0010012)
157 157
158 158
159/* Known device types */
160#define RNDIS_UNKNOWN 0
161#define RNDIS_BCM4320A 1
162#define RNDIS_BCM4320B 2
163
164
159/* NDIS data structures. Taken from wpa_supplicant driver_ndis.c 165/* NDIS data structures. Taken from wpa_supplicant driver_ndis.c
160 * slightly modified for datatype endianess, etc 166 * slightly modified for datatype endianess, etc
161 */ 167 */
@@ -478,6 +484,7 @@ struct rndis_wlan_private {
478 struct ieee80211_rate rates[ARRAY_SIZE(rndis_rates)]; 484 struct ieee80211_rate rates[ARRAY_SIZE(rndis_rates)];
479 u32 cipher_suites[ARRAY_SIZE(rndis_cipher_suites)]; 485 u32 cipher_suites[ARRAY_SIZE(rndis_cipher_suites)];
480 486
487 int device_type;
481 int caps; 488 int caps;
482 int multicast_size; 489 int multicast_size;
483 490
@@ -810,7 +817,8 @@ exit_unlock:
810 return ret; 817 return ret;
811} 818}
812 819
813static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len) 820static int rndis_set_oid(struct usbnet *dev, __le32 oid, const void *data,
821 int len)
814{ 822{
815 struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); 823 struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
816 union { 824 union {
@@ -994,7 +1002,18 @@ static int level_to_qual(int level)
994 */ 1002 */
995static int set_infra_mode(struct usbnet *usbdev, int mode); 1003static int set_infra_mode(struct usbnet *usbdev, int mode);
996static void restore_keys(struct usbnet *usbdev); 1004static void restore_keys(struct usbnet *usbdev);
997static int rndis_check_bssid_list(struct usbnet *usbdev); 1005static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid,
1006 bool *matched);
1007
1008static int rndis_start_bssid_list_scan(struct usbnet *usbdev)
1009{
1010 __le32 tmp;
1011
1012 /* Note: OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */
1013 tmp = cpu_to_le32(1);
1014 return rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
1015 sizeof(tmp));
1016}
998 1017
999static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid) 1018static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
1000{ 1019{
@@ -1015,7 +1034,7 @@ static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
1015 return ret; 1034 return ret;
1016} 1035}
1017 1036
1018static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) 1037static int set_bssid(struct usbnet *usbdev, const u8 *bssid)
1019{ 1038{
1020 int ret; 1039 int ret;
1021 1040
@@ -1031,7 +1050,9 @@ static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
1031 1050
1032static int clear_bssid(struct usbnet *usbdev) 1051static int clear_bssid(struct usbnet *usbdev)
1033{ 1052{
1034 u8 broadcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1053 static const u8 broadcast_mac[ETH_ALEN] = {
1054 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1055 };
1035 1056
1036 return set_bssid(usbdev, broadcast_mac); 1057 return set_bssid(usbdev, broadcast_mac);
1037} 1058}
@@ -1904,14 +1925,14 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
1904 struct usbnet *usbdev = netdev_priv(dev); 1925 struct usbnet *usbdev = netdev_priv(dev);
1905 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 1926 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1906 int ret; 1927 int ret;
1907 __le32 tmp; 1928 int delay = SCAN_DELAY_JIFFIES;
1908 1929
1909 netdev_dbg(usbdev->net, "cfg80211.scan\n"); 1930 netdev_dbg(usbdev->net, "cfg80211.scan\n");
1910 1931
1911 /* Get current bssid list from device before new scan, as new scan 1932 /* Get current bssid list from device before new scan, as new scan
1912 * clears internal bssid list. 1933 * clears internal bssid list.
1913 */ 1934 */
1914 rndis_check_bssid_list(usbdev); 1935 rndis_check_bssid_list(usbdev, NULL, NULL);
1915 1936
1916 if (!request) 1937 if (!request)
1917 return -EINVAL; 1938 return -EINVAL;
@@ -1921,13 +1942,13 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
1921 1942
1922 priv->scan_request = request; 1943 priv->scan_request = request;
1923 1944
1924 tmp = cpu_to_le32(1); 1945 ret = rndis_start_bssid_list_scan(usbdev);
1925 ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
1926 sizeof(tmp));
1927 if (ret == 0) { 1946 if (ret == 0) {
1947 if (priv->device_type == RNDIS_BCM4320A)
1948 delay = HZ;
1949
1928 /* Wait before retrieving scan results from device */ 1950 /* Wait before retrieving scan results from device */
1929 queue_delayed_work(priv->workqueue, &priv->scan_work, 1951 queue_delayed_work(priv->workqueue, &priv->scan_work, delay);
1930 SCAN_DELAY_JIFFIES);
1931 } 1952 }
1932 1953
1933 return ret; 1954 return ret;
@@ -1981,7 +2002,8 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
1981 GFP_KERNEL); 2002 GFP_KERNEL);
1982} 2003}
1983 2004
1984static int rndis_check_bssid_list(struct usbnet *usbdev) 2005static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid,
2006 bool *matched)
1985{ 2007{
1986 void *buf = NULL; 2008 void *buf = NULL;
1987 struct ndis_80211_bssid_list_ex *bssid_list; 2009 struct ndis_80211_bssid_list_ex *bssid_list;
@@ -2017,7 +2039,11 @@ resize_buf:
2017 count, len); 2039 count, len);
2018 2040
2019 while (count && ((void *)bssid + bssid_len) <= (buf + len)) { 2041 while (count && ((void *)bssid + bssid_len) <= (buf + len)) {
2020 rndis_bss_info_update(usbdev, bssid); 2042 if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
2043 matched) {
2044 if (compare_ether_addr(bssid->mac, match_bssid))
2045 *matched = true;
2046 }
2021 2047
2022 bssid = (void *)bssid + bssid_len; 2048 bssid = (void *)bssid + bssid_len;
2023 bssid_len = le32_to_cpu(bssid->length); 2049 bssid_len = le32_to_cpu(bssid->length);
@@ -2041,7 +2067,7 @@ static void rndis_get_scan_results(struct work_struct *work)
2041 if (!priv->scan_request) 2067 if (!priv->scan_request)
2042 return; 2068 return;
2043 2069
2044 ret = rndis_check_bssid_list(usbdev); 2070 ret = rndis_check_bssid_list(usbdev, NULL, NULL);
2045 2071
2046 cfg80211_scan_done(priv->scan_request, ret < 0); 2072 cfg80211_scan_done(priv->scan_request, ret < 0);
2047 2073
@@ -2495,6 +2521,91 @@ static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
2495 return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid)); 2521 return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid));
2496} 2522}
2497 2523
2524static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
2525 struct ndis_80211_assoc_info *info)
2526{
2527 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2528 struct ieee80211_channel *channel;
2529 struct ndis_80211_conf config;
2530 struct ndis_80211_ssid ssid;
2531 s32 signal;
2532 u64 timestamp;
2533 u16 capability;
2534 u16 beacon_interval;
2535 __le32 rssi;
2536 u8 ie_buf[34];
2537 int len, ret, ie_len;
2538
2539 /* Get signal quality, in case of error use rssi=0 and ignore error. */
2540 len = sizeof(rssi);
2541 rssi = 0;
2542 ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len);
2543 signal = level_to_qual(le32_to_cpu(rssi));
2544
2545 netdev_dbg(usbdev->net, "%s(): OID_802_11_RSSI -> %d, "
2546 "rssi:%d, qual: %d\n", __func__, ret, le32_to_cpu(rssi),
2547 level_to_qual(le32_to_cpu(rssi)));
2548
2549 /* Get AP capabilities */
2550 if (info) {
2551 capability = le16_to_cpu(info->resp_ie.capa);
2552 } else {
2553 /* Set atleast ESS/IBSS capability */
2554 capability = (priv->infra_mode == NDIS_80211_INFRA_INFRA) ?
2555 WLAN_CAPABILITY_ESS : WLAN_CAPABILITY_IBSS;
2556 }
2557
2558 /* Get channel and beacon interval */
2559 len = sizeof(config);
2560 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
2561 netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
2562 __func__, ret);
2563 if (ret >= 0) {
2564 beacon_interval = le16_to_cpu(config.beacon_period);
2565 channel = ieee80211_get_channel(priv->wdev.wiphy,
2566 KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
2567 if (!channel) {
2568 netdev_warn(usbdev->net, "%s(): could not get channel."
2569 "\n", __func__);
2570 return;
2571 }
2572 } else {
2573 netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
2574 __func__);
2575 return;
2576 }
2577
2578 /* Get SSID, in case of error, use zero length SSID and ignore error. */
2579 len = sizeof(ssid);
2580 memset(&ssid, 0, sizeof(ssid));
2581 ret = rndis_query_oid(usbdev, OID_802_11_SSID, &ssid, &len);
2582 netdev_dbg(usbdev->net, "%s(): OID_802_11_SSID -> %d, len: %d, ssid: "
2583 "'%.32s'\n", __func__, ret,
2584 le32_to_cpu(ssid.length), ssid.essid);
2585
2586 if (le32_to_cpu(ssid.length) > 32)
2587 ssid.length = cpu_to_le32(32);
2588
2589 ie_buf[0] = WLAN_EID_SSID;
2590 ie_buf[1] = le32_to_cpu(ssid.length);
2591 memcpy(&ie_buf[2], ssid.essid, le32_to_cpu(ssid.length));
2592
2593 ie_len = le32_to_cpu(ssid.length) + 2;
2594
2595 /* no tsf */
2596 timestamp = 0;
2597
2598 netdev_dbg(usbdev->net, "%s(): channel:%d(freq), bssid:[%pM], tsf:%d, "
2599 "capa:%x, beacon int:%d, resp_ie(len:%d, essid:'%.32s'), "
2600 "signal:%d\n", __func__, (channel ? channel->center_freq : -1),
2601 bssid, (u32)timestamp, capability, beacon_interval, ie_len,
2602 ssid.essid, signal);
2603
2604 cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
2605 timestamp, capability, beacon_interval, ie_buf, ie_len,
2606 signal, GFP_KERNEL);
2607}
2608
2498/* 2609/*
2499 * workers, indication handlers, device poller 2610 * workers, indication handlers, device poller
2500 */ 2611 */
@@ -2507,6 +2618,7 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2507 u8 *req_ie, *resp_ie; 2618 u8 *req_ie, *resp_ie;
2508 int ret, offset; 2619 int ret, offset;
2509 bool roamed = false; 2620 bool roamed = false;
2621 bool match_bss;
2510 2622
2511 if (priv->infra_mode == NDIS_80211_INFRA_INFRA && priv->connected) { 2623 if (priv->infra_mode == NDIS_80211_INFRA_INFRA && priv->connected) {
2512 /* received media connect indication while connected, either 2624 /* received media connect indication while connected, either
@@ -2558,6 +2670,13 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2558 resp_ie_len = 2670 resp_ie_len =
2559 CONTROL_BUFFER_SIZE - offset; 2671 CONTROL_BUFFER_SIZE - offset;
2560 } 2672 }
2673 } else {
2674 /* Since rndis_wlan_craft_connected_bss() might use info
2675 * later and expects info to contain valid data if
2676 * non-null, free info and set NULL here.
2677 */
2678 kfree(info);
2679 info = NULL;
2561 } 2680 }
2562 } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC)) 2681 } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC))
2563 return; 2682 return;
@@ -2569,13 +2688,26 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2569 netdev_dbg(usbdev->net, "link up work: [%pM]%s\n", 2688 netdev_dbg(usbdev->net, "link up work: [%pM]%s\n",
2570 bssid, roamed ? " roamed" : ""); 2689 bssid, roamed ? " roamed" : "");
2571 2690
2572 /* Internal bss list in device always contains at least the currently 2691 /* Internal bss list in device should contain at least the currently
2573 * connected bss and we can get it to cfg80211 with 2692 * connected bss and we can get it to cfg80211 with
2574 * rndis_check_bssid_list(). 2693 * rndis_check_bssid_list().
2575 * NOTE: This is true for Broadcom chip, but not mentioned in RNDIS 2694 *
2576 * spec. 2695 * NDIS spec says: "If the device is associated, but the associated
2696 * BSSID is not in its BSSID scan list, then the driver must add an
2697 * entry for the BSSID at the end of the data that it returns in
2698 * response to query of OID_802_11_BSSID_LIST."
2699 *
2700 * NOTE: Seems to be true for BCM4320b variant, but not BCM4320a.
2577 */ 2701 */
2578 rndis_check_bssid_list(usbdev); 2702 match_bss = false;
2703 rndis_check_bssid_list(usbdev, bssid, &match_bss);
2704
2705 if (!is_zero_ether_addr(bssid) && !match_bss) {
2706 /* Couldn't get bss from device, we need to manually craft bss
2707 * for cfg80211.
2708 */
2709 rndis_wlan_craft_connected_bss(usbdev, bssid, info);
2710 }
2579 2711
2580 if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { 2712 if (priv->infra_mode == NDIS_80211_INFRA_INFRA) {
2581 if (!roamed) 2713 if (!roamed)
@@ -2934,8 +3066,21 @@ static void rndis_device_poller(struct work_struct *work)
2934 * also polls device with rndis_command() and catches for media link 3066 * also polls device with rndis_command() and catches for media link
2935 * indications. 3067 * indications.
2936 */ 3068 */
2937 if (!is_associated(usbdev)) 3069 if (!is_associated(usbdev)) {
3070 /* Workaround bad scanning in BCM4320a devices with active
3071 * background scanning when not associated.
3072 */
3073 if (priv->device_type == RNDIS_BCM4320A && priv->radio_on &&
3074 !priv->scan_request) {
3075 /* Get previous scan results */
3076 rndis_check_bssid_list(usbdev, NULL, NULL);
3077
3078 /* Initiate new scan */
3079 rndis_start_bssid_list_scan(usbdev);
3080 }
3081
2938 goto end; 3082 goto end;
3083 }
2939 3084
2940 len = sizeof(rssi); 3085 len = sizeof(rssi);
2941 ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); 3086 ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len);
@@ -2992,10 +3137,12 @@ end:
2992/* 3137/*
2993 * driver/device initialization 3138 * driver/device initialization
2994 */ 3139 */
2995static void rndis_copy_module_params(struct usbnet *usbdev) 3140static void rndis_copy_module_params(struct usbnet *usbdev, int device_type)
2996{ 3141{
2997 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 3142 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2998 3143
3144 priv->device_type = device_type;
3145
2999 priv->param_country[0] = modparam_country[0]; 3146 priv->param_country[0] = modparam_country[0];
3000 priv->param_country[1] = modparam_country[1]; 3147 priv->param_country[1] = modparam_country[1];
3001 priv->param_country[2] = 0; 3148 priv->param_country[2] = 0;
@@ -3038,12 +3185,25 @@ static void rndis_copy_module_params(struct usbnet *usbdev)
3038 priv->param_workaround_interval = modparam_workaround_interval; 3185 priv->param_workaround_interval = modparam_workaround_interval;
3039} 3186}
3040 3187
3188static int unknown_early_init(struct usbnet *usbdev)
3189{
3190 /* copy module parameters for unknown so that iwconfig reports txpower
3191 * and workaround parameter is copied to private structure correctly.
3192 */
3193 rndis_copy_module_params(usbdev, RNDIS_UNKNOWN);
3194
3195 /* This is unknown device, so do not try set configuration parameters.
3196 */
3197
3198 return 0;
3199}
3200
3041static int bcm4320a_early_init(struct usbnet *usbdev) 3201static int bcm4320a_early_init(struct usbnet *usbdev)
3042{ 3202{
3043 /* copy module parameters for bcm4320a so that iwconfig reports txpower 3203 /* copy module parameters for bcm4320a so that iwconfig reports txpower
3044 * and workaround parameter is copied to private structure correctly. 3204 * and workaround parameter is copied to private structure correctly.
3045 */ 3205 */
3046 rndis_copy_module_params(usbdev); 3206 rndis_copy_module_params(usbdev, RNDIS_BCM4320A);
3047 3207
3048 /* bcm4320a doesn't handle configuration parameters well. Try 3208 /* bcm4320a doesn't handle configuration parameters well. Try
3049 * set any and you get partially zeroed mac and broken device. 3209 * set any and you get partially zeroed mac and broken device.
@@ -3057,7 +3217,7 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
3057 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 3217 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
3058 char buf[8]; 3218 char buf[8];
3059 3219
3060 rndis_copy_module_params(usbdev); 3220 rndis_copy_module_params(usbdev, RNDIS_BCM4320B);
3061 3221
3062 /* Early initialization settings, setting these won't have effect 3222 /* Early initialization settings, setting these won't have effect
3063 * if called after generic_rndis_bind(). 3223 * if called after generic_rndis_bind().
@@ -3320,7 +3480,7 @@ static const struct driver_info rndis_wlan_info = {
3320 .tx_fixup = rndis_tx_fixup, 3480 .tx_fixup = rndis_tx_fixup,
3321 .reset = rndis_wlan_reset, 3481 .reset = rndis_wlan_reset,
3322 .stop = rndis_wlan_stop, 3482 .stop = rndis_wlan_stop,
3323 .early_init = bcm4320a_early_init, 3483 .early_init = unknown_early_init,
3324 .indication = rndis_wlan_indication, 3484 .indication = rndis_wlan_indication,
3325}; 3485};
3326 3486
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 4396d4b9bfb..6f383cd684b 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -53,51 +53,41 @@ config RT61PCI
53 53
54 When compiled as a module, this driver will be called rt61pci. 54 When compiled as a module, this driver will be called rt61pci.
55 55
56config RT2800PCI_PCI
57 boolean
58 depends on PCI
59 default y
60
61config RT2800PCI_SOC
62 boolean
63 depends on RALINK_RT288X || RALINK_RT305X
64 default y
65
66config RT2800PCI 56config RT2800PCI
67 tristate "Ralink rt28xx/rt30xx/rt35xx (PCI/PCIe/PCMCIA) support (EXPERIMENTAL)" 57 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
68 depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL 58 depends on PCI || RALINK_RT288X || RALINK_RT305X
69 select RT2800_LIB 59 select RT2800_LIB
70 select RT2X00_LIB_PCI if RT2800PCI_PCI 60 select RT2X00_LIB_PCI if PCI
71 select RT2X00_LIB_SOC if RT2800PCI_SOC 61 select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X
72 select RT2X00_LIB_HT 62 select RT2X00_LIB_HT
73 select RT2X00_LIB_FIRMWARE 63 select RT2X00_LIB_FIRMWARE
74 select RT2X00_LIB_CRYPTO 64 select RT2X00_LIB_CRYPTO
75 select CRC_CCITT 65 select CRC_CCITT
76 select EEPROM_93CX6 66 select EEPROM_93CX6
77 ---help--- 67 ---help---
78 This adds support for rt2800/rt3000/rt3500 wireless chipset family. 68 This adds support for rt27xx/rt28xx/rt30xx wireless chipset family.
79 Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052 69 Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890, RT3052,
80 70 RT3090, RT3091 & RT3092
81 This driver is non-functional at the moment and is intended for
82 developers.
83 71
84 When compiled as a module, this driver will be called "rt2800pci.ko". 72 When compiled as a module, this driver will be called "rt2800pci.ko".
85 73
86if RT2800PCI 74if RT2800PCI
87 75
88config RT2800PCI_RT30XX 76config RT2800PCI_RT33XX
89 bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices" 77 bool "rt2800pci - Include support for rt33xx devices (EXPERIMENTAL)"
90 default y 78 depends on EXPERIMENTAL
79 default n
91 ---help--- 80 ---help---
92 This adds support for rt30xx wireless chipset family to the 81 This adds support for rt33xx wireless chipset family to the
93 rt2800pci driver. 82 rt2800pci driver.
94 Supported chips: RT3090, RT3091 & RT3092 83 Supported chips: RT3390
95 84
96 Support for these devices is non-functional at the moment and is 85 Support for these devices is non-functional at the moment and is
97 intended for testers and developers. 86 intended for testers and developers.
98 87
99config RT2800PCI_RT35XX 88config RT2800PCI_RT35XX
100 bool "rt2800pci - Include support for rt35xx (PCI/PCIe/PCMCIA) devices" 89 bool "rt2800pci - Include support for rt35xx devices (EXPERIMENTAL)"
90 depends on EXPERIMENTAL
101 default n 91 default n
102 ---help--- 92 ---help---
103 This adds support for rt35xx wireless chipset family to the 93 This adds support for rt35xx wireless chipset family to the
@@ -134,8 +124,8 @@ config RT73USB
134 When compiled as a module, this driver will be called rt73usb. 124 When compiled as a module, this driver will be called rt73usb.
135 125
136config RT2800USB 126config RT2800USB
137 tristate "Ralink rt2800 (USB) support (EXPERIMENTAL)" 127 tristate "Ralink rt27xx/rt28xx/rt30xx (USB) support"
138 depends on USB && EXPERIMENTAL 128 depends on USB
139 select RT2800_LIB 129 select RT2800_LIB
140 select RT2X00_LIB_USB 130 select RT2X00_LIB_USB
141 select RT2X00_LIB_HT 131 select RT2X00_LIB_HT
@@ -143,30 +133,28 @@ config RT2800USB
143 select RT2X00_LIB_CRYPTO 133 select RT2X00_LIB_CRYPTO
144 select CRC_CCITT 134 select CRC_CCITT
145 ---help--- 135 ---help---
146 This adds experimental support for rt2800 wireless chipset family. 136 This adds support for rt27xx/rt28xx/rt30xx wireless chipset family.
147 Supported chips: RT2770, RT2870 & RT3070. 137 Supported chips: RT2770, RT2870 & RT3070, RT3071 & RT3072
148
149 Known issues:
150 - support for RT2870 chips doesn't work with 802.11n APs yet
151 - support for RT3070 chips is non-functional at the moment
152 138
153 When compiled as a module, this driver will be called "rt2800usb.ko". 139 When compiled as a module, this driver will be called "rt2800usb.ko".
154 140
155if RT2800USB 141if RT2800USB
156 142
157config RT2800USB_RT30XX 143config RT2800USB_RT33XX
158 bool "rt2800usb - Include support for rt30xx (USB) devices" 144 bool "rt2800usb - Include support for rt33xx devices (EXPERIMENTAL)"
159 default y 145 depends on EXPERIMENTAL
146 default n
160 ---help--- 147 ---help---
161 This adds support for rt30xx wireless chipset family to the 148 This adds support for rt33xx wireless chipset family to the
162 rt2800usb driver. 149 rt2800usb driver.
163 Supported chips: RT3070, RT3071 & RT3072 150 Supported chips: RT3370
164 151
165 Support for these devices is non-functional at the moment and is 152 Support for these devices is non-functional at the moment and is
166 intended for testers and developers. 153 intended for testers and developers.
167 154
168config RT2800USB_RT35XX 155config RT2800USB_RT35XX
169 bool "rt2800usb - Include support for rt35xx (USB) devices" 156 bool "rt2800usb - Include support for rt35xx devices (EXPERIMENTAL)"
157 depends on EXPERIMENTAL
170 default n 158 default n
171 ---help--- 159 ---help---
172 This adds support for rt35xx wireless chipset family to the 160 This adds support for rt35xx wireless chipset family to the
@@ -180,9 +168,9 @@ config RT2800USB_UNKNOWN
180 bool "rt2800usb - Include support for unknown (USB) devices" 168 bool "rt2800usb - Include support for unknown (USB) devices"
181 default n 169 default n
182 ---help--- 170 ---help---
183 This adds support for rt2800 family devices that are known to 171 This adds support for rt2800usb devices that are known to
184 have a rt2800 family chipset, but for which the exact chipset 172 have a rt28xx family compatible chipset, but for which the exact
185 is unknown. 173 chipset is unknown.
186 174
187 Support status for these devices is unknown, and enabling these 175 Support status for these devices is unknown, and enabling these
188 devices may or may not work. 176 devices may or may not work.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 4f420a9ec5d..9ec6691adf0 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -885,8 +885,7 @@ static void rt2400pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
885 885
886 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg); 886 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
887 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 887 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX,
888 (state == STATE_RADIO_RX_OFF) || 888 (state == STATE_RADIO_RX_OFF));
889 (state == STATE_RADIO_RX_OFF_LINK));
890 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 889 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
891} 890}
892 891
@@ -989,9 +988,7 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
989 rt2400pci_disable_radio(rt2x00dev); 988 rt2400pci_disable_radio(rt2x00dev);
990 break; 989 break;
991 case STATE_RADIO_RX_ON: 990 case STATE_RADIO_RX_ON:
992 case STATE_RADIO_RX_ON_LINK:
993 case STATE_RADIO_RX_OFF: 991 case STATE_RADIO_RX_OFF:
994 case STATE_RADIO_RX_OFF_LINK:
995 rt2400pci_toggle_rx(rt2x00dev, state); 992 rt2400pci_toggle_rx(rt2x00dev, state);
996 break; 993 break;
997 case STATE_RADIO_IRQ_ON: 994 case STATE_RADIO_IRQ_ON:
@@ -1612,6 +1609,7 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
1612 .get_tsf = rt2400pci_get_tsf, 1609 .get_tsf = rt2400pci_get_tsf,
1613 .tx_last_beacon = rt2400pci_tx_last_beacon, 1610 .tx_last_beacon = rt2400pci_tx_last_beacon,
1614 .rfkill_poll = rt2x00mac_rfkill_poll, 1611 .rfkill_poll = rt2x00mac_rfkill_poll,
1612 .flush = rt2x00mac_flush,
1615}; 1613};
1616 1614
1617static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { 1615static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
@@ -1640,28 +1638,28 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1640}; 1638};
1641 1639
1642static const struct data_queue_desc rt2400pci_queue_rx = { 1640static const struct data_queue_desc rt2400pci_queue_rx = {
1643 .entry_num = RX_ENTRIES, 1641 .entry_num = 24,
1644 .data_size = DATA_FRAME_SIZE, 1642 .data_size = DATA_FRAME_SIZE,
1645 .desc_size = RXD_DESC_SIZE, 1643 .desc_size = RXD_DESC_SIZE,
1646 .priv_size = sizeof(struct queue_entry_priv_pci), 1644 .priv_size = sizeof(struct queue_entry_priv_pci),
1647}; 1645};
1648 1646
1649static const struct data_queue_desc rt2400pci_queue_tx = { 1647static const struct data_queue_desc rt2400pci_queue_tx = {
1650 .entry_num = TX_ENTRIES, 1648 .entry_num = 24,
1651 .data_size = DATA_FRAME_SIZE, 1649 .data_size = DATA_FRAME_SIZE,
1652 .desc_size = TXD_DESC_SIZE, 1650 .desc_size = TXD_DESC_SIZE,
1653 .priv_size = sizeof(struct queue_entry_priv_pci), 1651 .priv_size = sizeof(struct queue_entry_priv_pci),
1654}; 1652};
1655 1653
1656static const struct data_queue_desc rt2400pci_queue_bcn = { 1654static const struct data_queue_desc rt2400pci_queue_bcn = {
1657 .entry_num = BEACON_ENTRIES, 1655 .entry_num = 1,
1658 .data_size = MGMT_FRAME_SIZE, 1656 .data_size = MGMT_FRAME_SIZE,
1659 .desc_size = TXD_DESC_SIZE, 1657 .desc_size = TXD_DESC_SIZE,
1660 .priv_size = sizeof(struct queue_entry_priv_pci), 1658 .priv_size = sizeof(struct queue_entry_priv_pci),
1661}; 1659};
1662 1660
1663static const struct data_queue_desc rt2400pci_queue_atim = { 1661static const struct data_queue_desc rt2400pci_queue_atim = {
1664 .entry_num = ATIM_ENTRIES, 1662 .entry_num = 8,
1665 .data_size = DATA_FRAME_SIZE, 1663 .data_size = DATA_FRAME_SIZE,
1666 .desc_size = TXD_DESC_SIZE, 1664 .desc_size = TXD_DESC_SIZE,
1667 .priv_size = sizeof(struct queue_entry_priv_pci), 1665 .priv_size = sizeof(struct queue_entry_priv_pci),
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index c048b18f413..d3a4a68cc43 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -809,8 +809,8 @@
809/* 809/*
810 * DMA descriptor defines. 810 * DMA descriptor defines.
811 */ 811 */
812#define TXD_DESC_SIZE ( 8 * sizeof(__le32) ) 812#define TXD_DESC_SIZE (8 * sizeof(__le32))
813#define RXD_DESC_SIZE ( 8 * sizeof(__le32) ) 813#define RXD_DESC_SIZE (8 * sizeof(__le32))
814 814
815/* 815/*
816 * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. 816 * TX descriptor format for TX, PRIO, ATIM and Beacon Ring.
@@ -948,6 +948,6 @@
948 ((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER) 948 ((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER)
949 949
950#define TXPOWER_TO_DEV(__txpower) \ 950#define TXPOWER_TO_DEV(__txpower) \
951 MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER) 951 (MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER))
952 952
953#endif /* RT2400PCI_H */ 953#endif /* RT2400PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 97feb7aef80..3e7f2034624 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1040,8 +1040,7 @@ static void rt2500pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
1040 1040
1041 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg); 1041 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
1042 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1042 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX,
1043 (state == STATE_RADIO_RX_OFF) || 1043 (state == STATE_RADIO_RX_OFF));
1044 (state == STATE_RADIO_RX_OFF_LINK));
1045 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 1044 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
1046} 1045}
1047 1046
@@ -1144,9 +1143,7 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1144 rt2500pci_disable_radio(rt2x00dev); 1143 rt2500pci_disable_radio(rt2x00dev);
1145 break; 1144 break;
1146 case STATE_RADIO_RX_ON: 1145 case STATE_RADIO_RX_ON:
1147 case STATE_RADIO_RX_ON_LINK:
1148 case STATE_RADIO_RX_OFF: 1146 case STATE_RADIO_RX_OFF:
1149 case STATE_RADIO_RX_OFF_LINK:
1150 rt2500pci_toggle_rx(rt2x00dev, state); 1147 rt2500pci_toggle_rx(rt2x00dev, state);
1151 break; 1148 break;
1152 case STATE_RADIO_IRQ_ON: 1149 case STATE_RADIO_IRQ_ON:
@@ -1193,9 +1190,9 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
1193 1190
1194 rt2x00_desc_read(txd, 2, &word); 1191 rt2x00_desc_read(txd, 2, &word);
1195 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER); 1192 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
1196 rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs); 1193 rt2x00_set_field32(&word, TXD_W2_AIFS, entry->queue->aifs);
1197 rt2x00_set_field32(&word, TXD_W2_CWMIN, txdesc->cw_min); 1194 rt2x00_set_field32(&word, TXD_W2_CWMIN, entry->queue->cw_min);
1198 rt2x00_set_field32(&word, TXD_W2_CWMAX, txdesc->cw_max); 1195 rt2x00_set_field32(&word, TXD_W2_CWMAX, entry->queue->cw_max);
1199 rt2x00_desc_write(txd, 2, word); 1196 rt2x00_desc_write(txd, 2, word);
1200 1197
1201 rt2x00_desc_read(txd, 3, &word); 1198 rt2x00_desc_read(txd, 3, &word);
@@ -1909,6 +1906,7 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
1909 .get_tsf = rt2500pci_get_tsf, 1906 .get_tsf = rt2500pci_get_tsf,
1910 .tx_last_beacon = rt2500pci_tx_last_beacon, 1907 .tx_last_beacon = rt2500pci_tx_last_beacon,
1911 .rfkill_poll = rt2x00mac_rfkill_poll, 1908 .rfkill_poll = rt2x00mac_rfkill_poll,
1909 .flush = rt2x00mac_flush,
1912}; 1910};
1913 1911
1914static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { 1912static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
@@ -1937,28 +1935,28 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1937}; 1935};
1938 1936
1939static const struct data_queue_desc rt2500pci_queue_rx = { 1937static const struct data_queue_desc rt2500pci_queue_rx = {
1940 .entry_num = RX_ENTRIES, 1938 .entry_num = 32,
1941 .data_size = DATA_FRAME_SIZE, 1939 .data_size = DATA_FRAME_SIZE,
1942 .desc_size = RXD_DESC_SIZE, 1940 .desc_size = RXD_DESC_SIZE,
1943 .priv_size = sizeof(struct queue_entry_priv_pci), 1941 .priv_size = sizeof(struct queue_entry_priv_pci),
1944}; 1942};
1945 1943
1946static const struct data_queue_desc rt2500pci_queue_tx = { 1944static const struct data_queue_desc rt2500pci_queue_tx = {
1947 .entry_num = TX_ENTRIES, 1945 .entry_num = 32,
1948 .data_size = DATA_FRAME_SIZE, 1946 .data_size = DATA_FRAME_SIZE,
1949 .desc_size = TXD_DESC_SIZE, 1947 .desc_size = TXD_DESC_SIZE,
1950 .priv_size = sizeof(struct queue_entry_priv_pci), 1948 .priv_size = sizeof(struct queue_entry_priv_pci),
1951}; 1949};
1952 1950
1953static const struct data_queue_desc rt2500pci_queue_bcn = { 1951static const struct data_queue_desc rt2500pci_queue_bcn = {
1954 .entry_num = BEACON_ENTRIES, 1952 .entry_num = 1,
1955 .data_size = MGMT_FRAME_SIZE, 1953 .data_size = MGMT_FRAME_SIZE,
1956 .desc_size = TXD_DESC_SIZE, 1954 .desc_size = TXD_DESC_SIZE,
1957 .priv_size = sizeof(struct queue_entry_priv_pci), 1955 .priv_size = sizeof(struct queue_entry_priv_pci),
1958}; 1956};
1959 1957
1960static const struct data_queue_desc rt2500pci_queue_atim = { 1958static const struct data_queue_desc rt2500pci_queue_atim = {
1961 .entry_num = ATIM_ENTRIES, 1959 .entry_num = 8,
1962 .data_size = DATA_FRAME_SIZE, 1960 .data_size = DATA_FRAME_SIZE,
1963 .desc_size = TXD_DESC_SIZE, 1961 .desc_size = TXD_DESC_SIZE,
1964 .priv_size = sizeof(struct queue_entry_priv_pci), 1962 .priv_size = sizeof(struct queue_entry_priv_pci),
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index d708031361a..2aad7ba8a10 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -1088,8 +1088,8 @@
1088/* 1088/*
1089 * DMA descriptor defines. 1089 * DMA descriptor defines.
1090 */ 1090 */
1091#define TXD_DESC_SIZE ( 11 * sizeof(__le32) ) 1091#define TXD_DESC_SIZE (11 * sizeof(__le32))
1092#define RXD_DESC_SIZE ( 11 * sizeof(__le32) ) 1092#define RXD_DESC_SIZE (11 * sizeof(__le32))
1093 1093
1094/* 1094/*
1095 * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. 1095 * TX descriptor format for TX, PRIO, ATIM and Beacon Ring.
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 93e44c7f3a7..8152fec3175 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -39,7 +39,7 @@
39/* 39/*
40 * Allow hardware encryption to be disabled. 40 * Allow hardware encryption to be disabled.
41 */ 41 */
42static int modparam_nohwcrypt = 0; 42static int modparam_nohwcrypt;
43module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 43module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
44MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 44MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
45 45
@@ -938,8 +938,7 @@ static void rt2500usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
938 938
939 rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg); 939 rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
940 rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 940 rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX,
941 (state == STATE_RADIO_RX_OFF) || 941 (state == STATE_RADIO_RX_OFF));
942 (state == STATE_RADIO_RX_OFF_LINK));
943 rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); 942 rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
944} 943}
945 944
@@ -1019,9 +1018,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1019 rt2500usb_disable_radio(rt2x00dev); 1018 rt2500usb_disable_radio(rt2x00dev);
1020 break; 1019 break;
1021 case STATE_RADIO_RX_ON: 1020 case STATE_RADIO_RX_ON:
1022 case STATE_RADIO_RX_ON_LINK:
1023 case STATE_RADIO_RX_OFF: 1021 case STATE_RADIO_RX_OFF:
1024 case STATE_RADIO_RX_OFF_LINK:
1025 rt2500usb_toggle_rx(rt2x00dev, state); 1022 rt2500usb_toggle_rx(rt2x00dev, state);
1026 break; 1023 break;
1027 case STATE_RADIO_IRQ_ON: 1024 case STATE_RADIO_IRQ_ON:
@@ -1081,9 +1078,9 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
1081 1078
1082 rt2x00_desc_read(txd, 1, &word); 1079 rt2x00_desc_read(txd, 1, &word);
1083 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); 1080 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1084 rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs); 1081 rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs);
1085 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1082 rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
1086 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1083 rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
1087 rt2x00_desc_write(txd, 1, word); 1084 rt2x00_desc_write(txd, 1, word);
1088 1085
1089 rt2x00_desc_read(txd, 2, &word); 1086 rt2x00_desc_read(txd, 2, &word);
@@ -1801,6 +1798,7 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
1801 .bss_info_changed = rt2x00mac_bss_info_changed, 1798 .bss_info_changed = rt2x00mac_bss_info_changed,
1802 .conf_tx = rt2x00mac_conf_tx, 1799 .conf_tx = rt2x00mac_conf_tx,
1803 .rfkill_poll = rt2x00mac_rfkill_poll, 1800 .rfkill_poll = rt2x00mac_rfkill_poll,
1801 .flush = rt2x00mac_flush,
1804}; 1802};
1805 1803
1806static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { 1804static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
@@ -1829,28 +1827,28 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1829}; 1827};
1830 1828
1831static const struct data_queue_desc rt2500usb_queue_rx = { 1829static const struct data_queue_desc rt2500usb_queue_rx = {
1832 .entry_num = RX_ENTRIES, 1830 .entry_num = 32,
1833 .data_size = DATA_FRAME_SIZE, 1831 .data_size = DATA_FRAME_SIZE,
1834 .desc_size = RXD_DESC_SIZE, 1832 .desc_size = RXD_DESC_SIZE,
1835 .priv_size = sizeof(struct queue_entry_priv_usb), 1833 .priv_size = sizeof(struct queue_entry_priv_usb),
1836}; 1834};
1837 1835
1838static const struct data_queue_desc rt2500usb_queue_tx = { 1836static const struct data_queue_desc rt2500usb_queue_tx = {
1839 .entry_num = TX_ENTRIES, 1837 .entry_num = 32,
1840 .data_size = DATA_FRAME_SIZE, 1838 .data_size = DATA_FRAME_SIZE,
1841 .desc_size = TXD_DESC_SIZE, 1839 .desc_size = TXD_DESC_SIZE,
1842 .priv_size = sizeof(struct queue_entry_priv_usb), 1840 .priv_size = sizeof(struct queue_entry_priv_usb),
1843}; 1841};
1844 1842
1845static const struct data_queue_desc rt2500usb_queue_bcn = { 1843static const struct data_queue_desc rt2500usb_queue_bcn = {
1846 .entry_num = BEACON_ENTRIES, 1844 .entry_num = 1,
1847 .data_size = MGMT_FRAME_SIZE, 1845 .data_size = MGMT_FRAME_SIZE,
1848 .desc_size = TXD_DESC_SIZE, 1846 .desc_size = TXD_DESC_SIZE,
1849 .priv_size = sizeof(struct queue_entry_priv_usb_bcn), 1847 .priv_size = sizeof(struct queue_entry_priv_usb_bcn),
1850}; 1848};
1851 1849
1852static const struct data_queue_desc rt2500usb_queue_atim = { 1850static const struct data_queue_desc rt2500usb_queue_atim = {
1853 .entry_num = ATIM_ENTRIES, 1851 .entry_num = 8,
1854 .data_size = DATA_FRAME_SIZE, 1852 .data_size = DATA_FRAME_SIZE,
1855 .desc_size = TXD_DESC_SIZE, 1853 .desc_size = TXD_DESC_SIZE,
1856 .priv_size = sizeof(struct queue_entry_priv_usb), 1854 .priv_size = sizeof(struct queue_entry_priv_usb),
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index eb8b6cab992..a81c4371835 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -47,6 +47,7 @@
47 * RF3021 2.4G 1T2R 47 * RF3021 2.4G 1T2R
48 * RF3022 2.4G 2T2R 48 * RF3022 2.4G 2T2R
49 * RF3052 2.4G 2T2R 49 * RF3052 2.4G 2T2R
50 * RF3320 2.4G 1T1R
50 */ 51 */
51#define RF2820 0x0001 52#define RF2820 0x0001
52#define RF2850 0x0002 53#define RF2850 0x0002
@@ -412,10 +413,22 @@
412#define BCN_OFFSET1_BCN7 FIELD32(0xff000000) 413#define BCN_OFFSET1_BCN7 FIELD32(0xff000000)
413 414
414/* 415/*
415 * PBF registers 416 * TXRXQ_PCNT: PBF register
416 * Most are for debug. Driver doesn't touch PBF register. 417 * PCNT_TX0Q: Page count for TX hardware queue 0
418 * PCNT_TX1Q: Page count for TX hardware queue 1
419 * PCNT_TX2Q: Page count for TX hardware queue 2
420 * PCNT_RX0Q: Page count for RX hardware queue
417 */ 421 */
418#define TXRXQ_PCNT 0x0438 422#define TXRXQ_PCNT 0x0438
423#define TXRXQ_PCNT_TX0Q FIELD32(0x000000ff)
424#define TXRXQ_PCNT_TX1Q FIELD32(0x0000ff00)
425#define TXRXQ_PCNT_TX2Q FIELD32(0x00ff0000)
426#define TXRXQ_PCNT_RX0Q FIELD32(0xff000000)
427
428/*
429 * PBF register
430 * Debug. Driver doesn't touch PBF register.
431 */
419#define PBF_DBG 0x043c 432#define PBF_DBG 0x043c
420 433
421/* 434/*
@@ -960,8 +973,31 @@
960 973
961/* 974/*
962 * TXOP_CTRL_CFG: 975 * TXOP_CTRL_CFG:
976 * TIMEOUT_TRUN_EN: Enable/Disable TXOP timeout truncation
977 * AC_TRUN_EN: Enable/Disable truncation for AC change
978 * TXRATEGRP_TRUN_EN: Enable/Disable truncation for TX rate group change
979 * USER_MODE_TRUN_EN: Enable/Disable truncation for user TXOP mode
980 * MIMO_PS_TRUN_EN: Enable/Disable truncation for MIMO PS RTS/CTS
981 * RESERVED_TRUN_EN: Reserved
982 * LSIG_TXOP_EN: Enable/Disable L-SIG TXOP protection
983 * EXT_CCA_EN: Enable/Disable extension channel CCA reference (Defer 40Mhz
984 * transmissions if extension CCA is clear).
985 * EXT_CCA_DLY: Extension CCA signal delay time (unit: us)
986 * EXT_CWMIN: CwMin for extension channel backoff
987 * 0: Disabled
988 *
963 */ 989 */
964#define TXOP_CTRL_CFG 0x1340 990#define TXOP_CTRL_CFG 0x1340
991#define TXOP_CTRL_CFG_TIMEOUT_TRUN_EN FIELD32(0x00000001)
992#define TXOP_CTRL_CFG_AC_TRUN_EN FIELD32(0x00000002)
993#define TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN FIELD32(0x00000004)
994#define TXOP_CTRL_CFG_USER_MODE_TRUN_EN FIELD32(0x00000008)
995#define TXOP_CTRL_CFG_MIMO_PS_TRUN_EN FIELD32(0x00000010)
996#define TXOP_CTRL_CFG_RESERVED_TRUN_EN FIELD32(0x00000020)
997#define TXOP_CTRL_CFG_LSIG_TXOP_EN FIELD32(0x00000040)
998#define TXOP_CTRL_CFG_EXT_CCA_EN FIELD32(0x00000080)
999#define TXOP_CTRL_CFG_EXT_CCA_DLY FIELD32(0x0000ff00)
1000#define TXOP_CTRL_CFG_EXT_CWMIN FIELD32(0x000f0000)
965 1001
966/* 1002/*
967 * TX_RTS_CFG: 1003 * TX_RTS_CFG:
@@ -1485,17 +1521,17 @@
1485#define SHARED_KEY_MODE_BASE 0x7000 1521#define SHARED_KEY_MODE_BASE 0x7000
1486 1522
1487#define MAC_WCID_ENTRY(__idx) \ 1523#define MAC_WCID_ENTRY(__idx) \
1488 ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) ) 1524 (MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)))
1489#define PAIRWISE_KEY_ENTRY(__idx) \ 1525#define PAIRWISE_KEY_ENTRY(__idx) \
1490 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) ) 1526 (PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)))
1491#define MAC_IVEIV_ENTRY(__idx) \ 1527#define MAC_IVEIV_ENTRY(__idx) \
1492 ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) ) 1528 (MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)))
1493#define MAC_WCID_ATTR_ENTRY(__idx) \ 1529#define MAC_WCID_ATTR_ENTRY(__idx) \
1494 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) ) 1530 (MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)))
1495#define SHARED_KEY_ENTRY(__idx) \ 1531#define SHARED_KEY_ENTRY(__idx) \
1496 ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) ) 1532 (SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)))
1497#define SHARED_KEY_MODE_ENTRY(__idx) \ 1533#define SHARED_KEY_MODE_ENTRY(__idx) \
1498 ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) ) 1534 (SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)))
1499 1535
1500struct mac_wcid_entry { 1536struct mac_wcid_entry {
1501 u8 mac[6]; 1537 u8 mac[6];
@@ -1635,9 +1671,9 @@ struct mac_iveiv_entry {
1635#define HW_BEACON_BASE7 0x5bc0 1671#define HW_BEACON_BASE7 0x5bc0
1636 1672
1637#define HW_BEACON_OFFSET(__index) \ 1673#define HW_BEACON_OFFSET(__index) \
1638 ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \ 1674 (((__index) < 4) ? (HW_BEACON_BASE0 + (__index * 0x0200)) : \
1639 (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \ 1675 (((__index) < 6) ? (HW_BEACON_BASE4 + ((__index - 4) * 0x0200)) : \
1640 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) ) 1676 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))))
1641 1677
1642/* 1678/*
1643 * BBP registers. 1679 * BBP registers.
@@ -1987,8 +2023,8 @@ struct mac_iveiv_entry {
1987/* 2023/*
1988 * DMA descriptor defines. 2024 * DMA descriptor defines.
1989 */ 2025 */
1990#define TXWI_DESC_SIZE ( 4 * sizeof(__le32) ) 2026#define TXWI_DESC_SIZE (4 * sizeof(__le32))
1991#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) ) 2027#define RXWI_DESC_SIZE (4 * sizeof(__le32))
1992 2028
1993/* 2029/*
1994 * TX WI structure 2030 * TX WI structure
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 5f00e00789d..75631614aba 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -277,13 +277,17 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
277 unsigned int i; 277 unsigned int i;
278 u32 reg; 278 u32 reg;
279 279
280 /*
281 * Some devices are really slow to respond here. Wait a whole second
282 * before timing out.
283 */
280 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 284 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
281 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 285 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
282 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) && 286 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
283 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY)) 287 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
284 return 0; 288 return 0;
285 289
286 msleep(1); 290 msleep(10);
287 } 291 }
288 292
289 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n"); 293 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
@@ -483,7 +487,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
483 txdesc->key_idx : 0xff); 487 txdesc->key_idx : 0xff);
484 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, 488 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
485 txdesc->length); 489 txdesc->length);
486 rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, txdesc->qid); 490 rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, entry->queue->qid);
487 rt2x00_set_field32(&word, TXWI_W1_PACKETID_ENTRY, (entry->entry_idx % 3) + 1); 491 rt2x00_set_field32(&word, TXWI_W1_PACKETID_ENTRY, (entry->entry_idx % 3) + 1);
488 rt2x00_desc_write(txwi, 1, word); 492 rt2x00_desc_write(txwi, 1, word);
489 493
@@ -727,7 +731,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
727 * that the TX_STA_FIFO stack has a size of 16. We stick to our 731 * that the TX_STA_FIFO stack has a size of 16. We stick to our
728 * tx ring size for now. 732 * tx ring size for now.
729 */ 733 */
730 for (i = 0; i < TX_ENTRIES; i++) { 734 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
731 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg); 735 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
732 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID)) 736 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
733 break; 737 break;
@@ -824,7 +828,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
824} 828}
825EXPORT_SYMBOL_GPL(rt2800_write_beacon); 829EXPORT_SYMBOL_GPL(rt2800_write_beacon);
826 830
827static void inline rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev, 831static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
828 unsigned int beacon_base) 832 unsigned int beacon_base)
829{ 833{
830 int i; 834 int i;
@@ -1144,6 +1148,7 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
1144 struct rt2x00intf_conf *conf, const unsigned int flags) 1148 struct rt2x00intf_conf *conf, const unsigned int flags)
1145{ 1149{
1146 u32 reg; 1150 u32 reg;
1151 bool update_bssid = false;
1147 1152
1148 if (flags & CONFIG_UPDATE_TYPE) { 1153 if (flags & CONFIG_UPDATE_TYPE) {
1149 /* 1154 /*
@@ -1173,6 +1178,16 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
1173 } 1178 }
1174 1179
1175 if (flags & CONFIG_UPDATE_MAC) { 1180 if (flags & CONFIG_UPDATE_MAC) {
1181 if (flags & CONFIG_UPDATE_TYPE &&
1182 conf->sync == TSF_SYNC_AP_NONE) {
1183 /*
1184 * The BSSID register has to be set to our own mac
1185 * address in AP mode.
1186 */
1187 memcpy(conf->bssid, conf->mac, sizeof(conf->mac));
1188 update_bssid = true;
1189 }
1190
1176 if (!is_zero_ether_addr((const u8 *)conf->mac)) { 1191 if (!is_zero_ether_addr((const u8 *)conf->mac)) {
1177 reg = le32_to_cpu(conf->mac[1]); 1192 reg = le32_to_cpu(conf->mac[1]);
1178 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff); 1193 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
@@ -1183,7 +1198,7 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
1183 conf->mac, sizeof(conf->mac)); 1198 conf->mac, sizeof(conf->mac));
1184 } 1199 }
1185 1200
1186 if (flags & CONFIG_UPDATE_BSSID) { 1201 if ((flags & CONFIG_UPDATE_BSSID) || update_bssid) {
1187 if (!is_zero_ether_addr((const u8 *)conf->bssid)) { 1202 if (!is_zero_ether_addr((const u8 *)conf->bssid)) {
1188 reg = le32_to_cpu(conf->bssid[1]); 1203 reg = le32_to_cpu(conf->bssid[1]);
1189 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3); 1204 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
@@ -1529,7 +1544,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1529 rt2x00_rf(rt2x00dev, RF3020) || 1544 rt2x00_rf(rt2x00dev, RF3020) ||
1530 rt2x00_rf(rt2x00dev, RF3021) || 1545 rt2x00_rf(rt2x00dev, RF3021) ||
1531 rt2x00_rf(rt2x00dev, RF3022) || 1546 rt2x00_rf(rt2x00dev, RF3022) ||
1532 rt2x00_rf(rt2x00dev, RF3052)) 1547 rt2x00_rf(rt2x00dev, RF3052) ||
1548 rt2x00_rf(rt2x00dev, RF3320))
1533 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); 1549 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
1534 else 1550 else
1535 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 1551 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
@@ -2097,7 +2113,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2097 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); 2113 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2098 } 2114 }
2099 2115
2100 rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f); 2116 /*
2117 * The legacy driver also sets TXOP_CTRL_CFG_RESERVED_TRUN_EN to 1
2118 * although it is reserved.
2119 */
2120 rt2800_register_read(rt2x00dev, TXOP_CTRL_CFG, &reg);
2121 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_TIMEOUT_TRUN_EN, 1);
2122 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_AC_TRUN_EN, 1);
2123 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN, 1);
2124 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_USER_MODE_TRUN_EN, 1);
2125 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_MIMO_PS_TRUN_EN, 1);
2126 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_RESERVED_TRUN_EN, 1);
2127 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_LSIG_TXOP_EN, 0);
2128 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CCA_EN, 0);
2129 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CCA_DLY, 88);
2130 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CWMIN, 0);
2131 rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg);
2132
2101 rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002); 2133 rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
2102 2134
2103 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg); 2135 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
@@ -2134,7 +2166,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2134 SHARED_KEY_MODE_ENTRY(i), 0); 2166 SHARED_KEY_MODE_ENTRY(i), 0);
2135 2167
2136 for (i = 0; i < 256; i++) { 2168 for (i = 0; i < 256; i++) {
2137 u32 wcid[2] = { 0xffffffff, 0x00ffffff }; 2169 static const u32 wcid[2] = { 0xffffffff, 0x00ffffff };
2138 rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i), 2170 rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
2139 wcid, sizeof(wcid)); 2171 wcid, sizeof(wcid));
2140 2172
@@ -2981,7 +3013,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
2981 !rt2x00_rf(rt2x00dev, RF2020) && 3013 !rt2x00_rf(rt2x00dev, RF2020) &&
2982 !rt2x00_rf(rt2x00dev, RF3021) && 3014 !rt2x00_rf(rt2x00dev, RF3021) &&
2983 !rt2x00_rf(rt2x00dev, RF3022) && 3015 !rt2x00_rf(rt2x00dev, RF3022) &&
2984 !rt2x00_rf(rt2x00dev, RF3052)) { 3016 !rt2x00_rf(rt2x00dev, RF3052) &&
3017 !rt2x00_rf(rt2x00dev, RF3320)) {
2985 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 3018 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
2986 return -ENODEV; 3019 return -ENODEV;
2987 } 3020 }
@@ -3245,7 +3278,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3245 } else if (rt2x00_rf(rt2x00dev, RF3020) || 3278 } else if (rt2x00_rf(rt2x00dev, RF3020) ||
3246 rt2x00_rf(rt2x00dev, RF2020) || 3279 rt2x00_rf(rt2x00dev, RF2020) ||
3247 rt2x00_rf(rt2x00dev, RF3021) || 3280 rt2x00_rf(rt2x00dev, RF3021) ||
3248 rt2x00_rf(rt2x00dev, RF3022)) { 3281 rt2x00_rf(rt2x00dev, RF3022) ||
3282 rt2x00_rf(rt2x00dev, RF3320)) {
3249 spec->num_channels = 14; 3283 spec->num_channels = 14;
3250 spec->channels = rf_vals_3x; 3284 spec->channels = rf_vals_3x;
3251 } else if (rt2x00_rf(rt2x00dev, RF3052)) { 3285 } else if (rt2x00_rf(rt2x00dev, RF3052)) {
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b2673953598..433c7f3ef83 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -84,20 +84,22 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
84 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 84 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
85} 85}
86 86
87#ifdef CONFIG_RT2800PCI_SOC 87#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
88static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 88static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
89{ 89{
90 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */ 90 void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
91 91
92 memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE); 92 memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
93
94 iounmap(base_addr);
93} 95}
94#else 96#else
95static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 97static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
96{ 98{
97} 99}
98#endif /* CONFIG_RT2800PCI_SOC */ 100#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
99 101
100#ifdef CONFIG_RT2800PCI_PCI 102#ifdef CONFIG_PCI
101static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 103static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
102{ 104{
103 struct rt2x00_dev *rt2x00dev = eeprom->data; 105 struct rt2x00_dev *rt2x00dev = eeprom->data;
@@ -181,7 +183,7 @@ static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
181static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) 183static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
182{ 184{
183} 185}
184#endif /* CONFIG_RT2800PCI_PCI */ 186#endif /* CONFIG_PCI */
185 187
186/* 188/*
187 * Firmware functions 189 * Firmware functions
@@ -328,8 +330,7 @@ static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
328 330
329 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 331 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
330 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 332 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
331 (state == STATE_RADIO_RX_ON) || 333 (state == STATE_RADIO_RX_ON));
332 (state == STATE_RADIO_RX_ON_LINK));
333 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 334 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
334} 335}
335 336
@@ -442,7 +443,7 @@ static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
442 * if the device is booting and wasn't asleep it will return 443 * if the device is booting and wasn't asleep it will return
443 * failure when attempting to wakeup. 444 * failure when attempting to wakeup.
444 */ 445 */
445 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2); 446 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
446 447
447 if (state == STATE_AWAKE) { 448 if (state == STATE_AWAKE) {
448 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0); 449 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
@@ -477,9 +478,7 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
477 rt2800pci_set_state(rt2x00dev, STATE_SLEEP); 478 rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
478 break; 479 break;
479 case STATE_RADIO_RX_ON: 480 case STATE_RADIO_RX_ON:
480 case STATE_RADIO_RX_ON_LINK:
481 case STATE_RADIO_RX_OFF: 481 case STATE_RADIO_RX_OFF:
482 case STATE_RADIO_RX_OFF_LINK:
483 rt2800pci_toggle_rx(rt2x00dev, state); 482 rt2800pci_toggle_rx(rt2x00dev, state);
484 break; 483 break;
485 case STATE_RADIO_IRQ_ON: 484 case STATE_RADIO_IRQ_ON:
@@ -777,7 +776,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
777 * Since we have only one producer and one consumer we don't 776 * Since we have only one producer and one consumer we don't
778 * need to lock the kfifo. 777 * need to lock the kfifo.
779 */ 778 */
780 for (i = 0; i < TX_ENTRIES; i++) { 779 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
781 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &status); 780 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &status);
782 781
783 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) 782 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
@@ -943,6 +942,7 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
943 .get_tsf = rt2800_get_tsf, 942 .get_tsf = rt2800_get_tsf,
944 .rfkill_poll = rt2x00mac_rfkill_poll, 943 .rfkill_poll = rt2x00mac_rfkill_poll,
945 .ampdu_action = rt2800_ampdu_action, 944 .ampdu_action = rt2800_ampdu_action,
945 .flush = rt2x00mac_flush,
946}; 946};
947 947
948static const struct rt2800_ops rt2800pci_rt2800_ops = { 948static const struct rt2800_ops rt2800pci_rt2800_ops = {
@@ -991,21 +991,21 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
991}; 991};
992 992
993static const struct data_queue_desc rt2800pci_queue_rx = { 993static const struct data_queue_desc rt2800pci_queue_rx = {
994 .entry_num = RX_ENTRIES, 994 .entry_num = 128,
995 .data_size = AGGREGATION_SIZE, 995 .data_size = AGGREGATION_SIZE,
996 .desc_size = RXD_DESC_SIZE, 996 .desc_size = RXD_DESC_SIZE,
997 .priv_size = sizeof(struct queue_entry_priv_pci), 997 .priv_size = sizeof(struct queue_entry_priv_pci),
998}; 998};
999 999
1000static const struct data_queue_desc rt2800pci_queue_tx = { 1000static const struct data_queue_desc rt2800pci_queue_tx = {
1001 .entry_num = TX_ENTRIES, 1001 .entry_num = 64,
1002 .data_size = AGGREGATION_SIZE, 1002 .data_size = AGGREGATION_SIZE,
1003 .desc_size = TXD_DESC_SIZE, 1003 .desc_size = TXD_DESC_SIZE,
1004 .priv_size = sizeof(struct queue_entry_priv_pci), 1004 .priv_size = sizeof(struct queue_entry_priv_pci),
1005}; 1005};
1006 1006
1007static const struct data_queue_desc rt2800pci_queue_bcn = { 1007static const struct data_queue_desc rt2800pci_queue_bcn = {
1008 .entry_num = 8 * BEACON_ENTRIES, 1008 .entry_num = 8,
1009 .data_size = 0, /* No DMA required for beacons */ 1009 .data_size = 0, /* No DMA required for beacons */
1010 .desc_size = TXWI_DESC_SIZE, 1010 .desc_size = TXWI_DESC_SIZE,
1011 .priv_size = sizeof(struct queue_entry_priv_pci), 1011 .priv_size = sizeof(struct queue_entry_priv_pci),
@@ -1033,12 +1033,15 @@ static const struct rt2x00_ops rt2800pci_ops = {
1033/* 1033/*
1034 * RT2800pci module information. 1034 * RT2800pci module information.
1035 */ 1035 */
1036#ifdef CONFIG_RT2800PCI_PCI 1036#ifdef CONFIG_PCI
1037static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { 1037static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1038 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1038 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
1039 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1039 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
1040 { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1040 { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
1041 { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1041 { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
1042 { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
1043 { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
1044 { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
1042 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1045 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
1043 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1046 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
1044 { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1047 { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1046,12 +1049,10 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1046 { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1049 { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
1047 { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1050 { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
1048 { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1051 { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
1049 { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
1050#ifdef CONFIG_RT2800PCI_RT30XX
1051 { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
1052 { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
1053 { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
1054 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1052 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
1053 { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
1054#ifdef CONFIG_RT2800PCI_RT33XX
1055 { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
1055#endif 1056#endif
1056#ifdef CONFIG_RT2800PCI_RT35XX 1057#ifdef CONFIG_RT2800PCI_RT35XX
1057 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1058 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1062,19 +1063,19 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1062#endif 1063#endif
1063 { 0, } 1064 { 0, }
1064}; 1065};
1065#endif /* CONFIG_RT2800PCI_PCI */ 1066#endif /* CONFIG_PCI */
1066 1067
1067MODULE_AUTHOR(DRV_PROJECT); 1068MODULE_AUTHOR(DRV_PROJECT);
1068MODULE_VERSION(DRV_VERSION); 1069MODULE_VERSION(DRV_VERSION);
1069MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver."); 1070MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
1070MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards"); 1071MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
1071#ifdef CONFIG_RT2800PCI_PCI 1072#ifdef CONFIG_PCI
1072MODULE_FIRMWARE(FIRMWARE_RT2860); 1073MODULE_FIRMWARE(FIRMWARE_RT2860);
1073MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); 1074MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
1074#endif /* CONFIG_RT2800PCI_PCI */ 1075#endif /* CONFIG_PCI */
1075MODULE_LICENSE("GPL"); 1076MODULE_LICENSE("GPL");
1076 1077
1077#ifdef CONFIG_RT2800PCI_SOC 1078#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
1078static int rt2800soc_probe(struct platform_device *pdev) 1079static int rt2800soc_probe(struct platform_device *pdev)
1079{ 1080{
1080 return rt2x00soc_probe(pdev, &rt2800pci_ops); 1081 return rt2x00soc_probe(pdev, &rt2800pci_ops);
@@ -1091,9 +1092,9 @@ static struct platform_driver rt2800soc_driver = {
1091 .suspend = rt2x00soc_suspend, 1092 .suspend = rt2x00soc_suspend,
1092 .resume = rt2x00soc_resume, 1093 .resume = rt2x00soc_resume,
1093}; 1094};
1094#endif /* CONFIG_RT2800PCI_SOC */ 1095#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
1095 1096
1096#ifdef CONFIG_RT2800PCI_PCI 1097#ifdef CONFIG_PCI
1097static struct pci_driver rt2800pci_driver = { 1098static struct pci_driver rt2800pci_driver = {
1098 .name = KBUILD_MODNAME, 1099 .name = KBUILD_MODNAME,
1099 .id_table = rt2800pci_device_table, 1100 .id_table = rt2800pci_device_table,
@@ -1102,21 +1103,21 @@ static struct pci_driver rt2800pci_driver = {
1102 .suspend = rt2x00pci_suspend, 1103 .suspend = rt2x00pci_suspend,
1103 .resume = rt2x00pci_resume, 1104 .resume = rt2x00pci_resume,
1104}; 1105};
1105#endif /* CONFIG_RT2800PCI_PCI */ 1106#endif /* CONFIG_PCI */
1106 1107
1107static int __init rt2800pci_init(void) 1108static int __init rt2800pci_init(void)
1108{ 1109{
1109 int ret = 0; 1110 int ret = 0;
1110 1111
1111#ifdef CONFIG_RT2800PCI_SOC 1112#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
1112 ret = platform_driver_register(&rt2800soc_driver); 1113 ret = platform_driver_register(&rt2800soc_driver);
1113 if (ret) 1114 if (ret)
1114 return ret; 1115 return ret;
1115#endif 1116#endif
1116#ifdef CONFIG_RT2800PCI_PCI 1117#ifdef CONFIG_PCI
1117 ret = pci_register_driver(&rt2800pci_driver); 1118 ret = pci_register_driver(&rt2800pci_driver);
1118 if (ret) { 1119 if (ret) {
1119#ifdef CONFIG_RT2800PCI_SOC 1120#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
1120 platform_driver_unregister(&rt2800soc_driver); 1121 platform_driver_unregister(&rt2800soc_driver);
1121#endif 1122#endif
1122 return ret; 1123 return ret;
@@ -1128,10 +1129,10 @@ static int __init rt2800pci_init(void)
1128 1129
1129static void __exit rt2800pci_exit(void) 1130static void __exit rt2800pci_exit(void)
1130{ 1131{
1131#ifdef CONFIG_RT2800PCI_PCI 1132#ifdef CONFIG_PCI
1132 pci_unregister_driver(&rt2800pci_driver); 1133 pci_unregister_driver(&rt2800pci_driver);
1133#endif 1134#endif
1134#ifdef CONFIG_RT2800PCI_SOC 1135#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
1135 platform_driver_unregister(&rt2800soc_driver); 1136 platform_driver_unregister(&rt2800soc_driver);
1136#endif 1137#endif
1137} 1138}
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index 5a8dda9b5b5..70e050d904c 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -38,10 +38,10 @@
38 * Queue register offset macros 38 * Queue register offset macros
39 */ 39 */
40#define TX_QUEUE_REG_OFFSET 0x10 40#define TX_QUEUE_REG_OFFSET 0x10
41#define TX_BASE_PTR(__x) TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET) 41#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
42#define TX_MAX_CNT(__x) TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET) 42#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
43#define TX_CTX_IDX(__x) TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET) 43#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
44#define TX_DTX_IDX(__x) TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET) 44#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
45 45
46/* 46/*
47 * 8051 firmware image. 47 * 8051 firmware image.
@@ -52,8 +52,8 @@
52/* 52/*
53 * DMA descriptor defines. 53 * DMA descriptor defines.
54 */ 54 */
55#define TXD_DESC_SIZE ( 4 * sizeof(__le32) ) 55#define TXD_DESC_SIZE (4 * sizeof(__le32))
56#define RXD_DESC_SIZE ( 4 * sizeof(__le32) ) 56#define RXD_DESC_SIZE (4 * sizeof(__le32))
57 57
58/* 58/*
59 * TX descriptor format for TX, PRIO and Beacon Ring. 59 * TX descriptor format for TX, PRIO and Beacon Ring.
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 3dff56ec195..935b76d3ce4 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,7 +45,7 @@
45/* 45/*
46 * Allow hardware encryption to be disabled. 46 * Allow hardware encryption to be disabled.
47 */ 47 */
48static int modparam_nohwcrypt = 0; 48static int modparam_nohwcrypt;
49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
51 51
@@ -114,8 +114,7 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
114 114
115 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 115 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
116 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 116 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
117 (state == STATE_RADIO_RX_ON) || 117 (state == STATE_RADIO_RX_ON));
118 (state == STATE_RADIO_RX_ON_LINK));
119 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 118 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
120} 119}
121 120
@@ -165,7 +164,8 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
165 * this limit so reduce the number to prevent errors. 164 * this limit so reduce the number to prevent errors.
166 */ 165 */
167 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT, 166 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT,
168 ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3); 167 ((rt2x00dev->ops->rx->entry_num * DATA_FRAME_SIZE)
168 / 1024) - 3);
169 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1); 169 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
170 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1); 170 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
171 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg); 171 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg);
@@ -183,9 +183,9 @@ static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
183 enum dev_state state) 183 enum dev_state state)
184{ 184{
185 if (state == STATE_AWAKE) 185 if (state == STATE_AWAKE)
186 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0); 186 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 2);
187 else 187 else
188 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2); 188 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
189 189
190 return 0; 190 return 0;
191} 191}
@@ -215,9 +215,7 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
215 rt2800usb_set_state(rt2x00dev, STATE_SLEEP); 215 rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
216 break; 216 break;
217 case STATE_RADIO_RX_ON: 217 case STATE_RADIO_RX_ON:
218 case STATE_RADIO_RX_ON_LINK:
219 case STATE_RADIO_RX_OFF: 218 case STATE_RADIO_RX_OFF:
220 case STATE_RADIO_RX_OFF_LINK:
221 rt2800usb_toggle_rx(rt2x00dev, state); 219 rt2800usb_toggle_rx(rt2x00dev, state);
222 break; 220 break;
223 case STATE_RADIO_IRQ_ON: 221 case STATE_RADIO_IRQ_ON:
@@ -245,6 +243,49 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
245} 243}
246 244
247/* 245/*
246 * Watchdog handlers
247 */
248static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
249{
250 unsigned int i;
251 u32 reg;
252
253 rt2800_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
254 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) {
255 WARNING(rt2x00dev, "TX HW queue 0 timed out,"
256 " invoke forced kick");
257
258 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40012);
259
260 for (i = 0; i < 10; i++) {
261 udelay(10);
262 if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q))
263 break;
264 }
265
266 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
267 }
268
269 rt2800_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
270 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) {
271 WARNING(rt2x00dev, "TX HW queue 1 timed out,"
272 " invoke forced kick");
273
274 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf4000a);
275
276 for (i = 0; i < 10; i++) {
277 udelay(10);
278 if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q))
279 break;
280 }
281
282 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
283 }
284
285 rt2x00usb_watchdog(rt2x00dev);
286}
287
288/*
248 * TX descriptor initialization 289 * TX descriptor initialization
249 */ 290 */
250static __le32 *rt2800usb_get_txwi(struct queue_entry *entry) 291static __le32 *rt2800usb_get_txwi(struct queue_entry *entry)
@@ -266,8 +307,14 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
266 * Initialize TXINFO descriptor 307 * Initialize TXINFO descriptor
267 */ 308 */
268 rt2x00_desc_read(txi, 0, &word); 309 rt2x00_desc_read(txi, 0, &word);
310
311 /*
312 * The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is
313 * TXWI + 802.11 header + L2 pad + payload + pad,
314 * so need to decrease size of TXINFO and USB end pad.
315 */
269 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 316 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
270 entry->skb->len - TXINFO_DESC_SIZE); 317 entry->skb->len - TXINFO_DESC_SIZE - 4);
271 rt2x00_set_field32(&word, TXINFO_W0_WIV, 318 rt2x00_set_field32(&word, TXINFO_W0_WIV,
272 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 319 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
273 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); 320 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -285,22 +332,29 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
285 skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE; 332 skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
286} 333}
287 334
288/* 335static void rt2800usb_write_tx_data(struct queue_entry *entry,
289 * TX data initialization 336 struct txentry_desc *txdesc)
290 */
291static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
292{ 337{
293 int length; 338 u8 padding_len;
294 339
295 /* 340 /*
296 * The length _must_ include 4 bytes padding, 341 * pad(1~3 bytes) is added after each 802.11 payload.
297 * it should always be multiple of 4, 342 * USB end pad(4 bytes) is added at each USB bulk out packet end.
298 * but it must _not_ be a multiple of the USB packet size. 343 * TX frame format is :
344 * | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad |
345 * |<------------- tx_pkt_len ------------->|
299 */ 346 */
300 length = roundup(entry->skb->len + 4, 4); 347 rt2800_write_tx_data(entry, txdesc);
301 length += (4 * !(length % entry->queue->usb_maxpacket)); 348 padding_len = roundup(entry->skb->len + 4, 4) - entry->skb->len;
349 memset(skb_put(entry->skb, padding_len), 0, padding_len);
350}
302 351
303 return length; 352/*
353 * TX data initialization
354 */
355static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
356{
357 return entry->skb->len;
304} 358}
305 359
306/* 360/*
@@ -507,6 +561,7 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
507 .get_tsf = rt2800_get_tsf, 561 .get_tsf = rt2800_get_tsf,
508 .rfkill_poll = rt2x00mac_rfkill_poll, 562 .rfkill_poll = rt2x00mac_rfkill_poll,
509 .ampdu_action = rt2800_ampdu_action, 563 .ampdu_action = rt2800_ampdu_action,
564 .flush = rt2x00mac_flush,
510}; 565};
511 566
512static const struct rt2800_ops rt2800usb_rt2800_ops = { 567static const struct rt2800_ops rt2800usb_rt2800_ops = {
@@ -535,9 +590,9 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
535 .link_stats = rt2800_link_stats, 590 .link_stats = rt2800_link_stats,
536 .reset_tuner = rt2800_reset_tuner, 591 .reset_tuner = rt2800_reset_tuner,
537 .link_tuner = rt2800_link_tuner, 592 .link_tuner = rt2800_link_tuner,
538 .watchdog = rt2x00usb_watchdog, 593 .watchdog = rt2800usb_watchdog,
539 .write_tx_desc = rt2800usb_write_tx_desc, 594 .write_tx_desc = rt2800usb_write_tx_desc,
540 .write_tx_data = rt2800_write_tx_data, 595 .write_tx_data = rt2800usb_write_tx_data,
541 .write_beacon = rt2800_write_beacon, 596 .write_beacon = rt2800_write_beacon,
542 .get_tx_data_len = rt2800usb_get_tx_data_len, 597 .get_tx_data_len = rt2800usb_get_tx_data_len,
543 .kick_tx_queue = rt2x00usb_kick_tx_queue, 598 .kick_tx_queue = rt2x00usb_kick_tx_queue,
@@ -553,21 +608,21 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
553}; 608};
554 609
555static const struct data_queue_desc rt2800usb_queue_rx = { 610static const struct data_queue_desc rt2800usb_queue_rx = {
556 .entry_num = RX_ENTRIES, 611 .entry_num = 128,
557 .data_size = AGGREGATION_SIZE, 612 .data_size = AGGREGATION_SIZE,
558 .desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE, 613 .desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE,
559 .priv_size = sizeof(struct queue_entry_priv_usb), 614 .priv_size = sizeof(struct queue_entry_priv_usb),
560}; 615};
561 616
562static const struct data_queue_desc rt2800usb_queue_tx = { 617static const struct data_queue_desc rt2800usb_queue_tx = {
563 .entry_num = TX_ENTRIES, 618 .entry_num = 64,
564 .data_size = AGGREGATION_SIZE, 619 .data_size = AGGREGATION_SIZE,
565 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, 620 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
566 .priv_size = sizeof(struct queue_entry_priv_usb), 621 .priv_size = sizeof(struct queue_entry_priv_usb),
567}; 622};
568 623
569static const struct data_queue_desc rt2800usb_queue_bcn = { 624static const struct data_queue_desc rt2800usb_queue_bcn = {
570 .entry_num = 8 * BEACON_ENTRIES, 625 .entry_num = 8,
571 .data_size = MGMT_FRAME_SIZE, 626 .data_size = MGMT_FRAME_SIZE,
572 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, 627 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
573 .priv_size = sizeof(struct queue_entry_priv_usb), 628 .priv_size = sizeof(struct queue_entry_priv_usb),
@@ -599,11 +654,19 @@ static struct usb_device_id rt2800usb_device_table[] = {
599 /* Abocom */ 654 /* Abocom */
600 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 655 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
601 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 656 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
657 { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
658 { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
659 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
602 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 660 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
661 /* AirTies */
662 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
603 /* Allwin */ 663 /* Allwin */
604 { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, 664 { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
605 { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 665 { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
606 { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 666 { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
667 { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
668 { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
669 { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
607 /* Amit */ 670 /* Amit */
608 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, 671 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
609 /* Askey */ 672 /* Askey */
@@ -612,8 +675,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
612 { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) }, 675 { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) },
613 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, 676 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
614 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, 677 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
678 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
615 /* AzureWave */ 679 /* AzureWave */
616 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, 680 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
681 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
682 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
683 { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
684 { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
617 /* Belkin */ 685 /* Belkin */
618 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) }, 686 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
619 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) }, 687 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -624,6 +692,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
624 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) }, 692 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
625 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) }, 693 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
626 { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 694 { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
695 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
627 { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) }, 696 { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) },
628 { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) }, 697 { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) },
629 { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) }, 698 { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -632,17 +701,36 @@ static struct usb_device_id rt2800usb_device_table[] = {
632 { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) }, 701 { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) },
633 { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) }, 702 { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
634 { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 703 { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
704 { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
635 /* D-Link */ 705 /* D-Link */
636 { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 706 { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
707 { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
708 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
709 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
710 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
637 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, 711 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
712 { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
713 /* Draytek */
714 { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
638 /* Edimax */ 715 /* Edimax */
716 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
639 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) }, 717 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
640 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) }, 718 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
719 /* Encore */
720 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
721 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
641 /* EnGenius */ 722 /* EnGenius */
642 { USB_DEVICE(0x1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) }, 723 { USB_DEVICE(0x1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
643 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) }, 724 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
725 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
726 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
727 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
728 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
729 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
730 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
644 /* Gigabyte */ 731 /* Gigabyte */
645 { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) }, 732 { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
733 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
646 /* Hawking */ 734 /* Hawking */
647 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) }, 735 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
648 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) }, 736 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -651,6 +739,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
651 { USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) }, 739 { USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) },
652 { USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) }, 740 { USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
653 { USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) }, 741 { USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) },
742 /* I-O DATA */
743 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
744 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
745 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
654 /* Linksys */ 746 /* Linksys */
655 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, 747 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
656 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, 748 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -658,17 +750,44 @@ static struct usb_device_id rt2800usb_device_table[] = {
658 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) }, 750 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
659 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) }, 751 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
660 { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) }, 752 { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
753 { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
661 /* Motorola */ 754 /* Motorola */
662 { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, 755 { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
663 /* MSI */ 756 /* MSI */
757 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
758 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
759 { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
760 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
761 { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
664 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) }, 762 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
763 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
764 { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
765 { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
766 { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
767 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
768 { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
769 { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
770 { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
771 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
772 /* Para */
773 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
774 /* Pegatron */
775 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
776 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
665 /* Philips */ 777 /* Philips */
666 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) }, 778 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
667 /* Planex */ 779 /* Planex */
780 { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
668 { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) }, 781 { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
782 /* Quanta */
783 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
669 /* Ralink */ 784 /* Ralink */
785 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
670 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 786 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
671 { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 787 { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
788 { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
789 { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
790 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
672 /* Samsung */ 791 /* Samsung */
673 { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) }, 792 { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
674 /* Siemens */ 793 /* Siemens */
@@ -681,13 +800,22 @@ static struct usb_device_id rt2800usb_device_table[] = {
681 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) }, 800 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
682 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) }, 801 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
683 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) }, 802 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
803 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
684 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 804 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
805 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
806 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
807 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
808 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
685 /* SMC */ 809 /* SMC */
686 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) }, 810 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
811 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
687 { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) }, 812 { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
688 { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) }, 813 { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
689 { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) }, 814 { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
690 { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) }, 815 { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
816 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
817 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
818 { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
691 { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) }, 819 { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
692 /* Sparklan */ 820 /* Sparklan */
693 { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) }, 821 { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -701,101 +829,16 @@ static struct usb_device_id rt2800usb_device_table[] = {
701 /* Zinwell */ 829 /* Zinwell */
702 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) }, 830 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
703 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) }, 831 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
832 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
833 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
704 /* Zyxel */ 834 /* Zyxel */
705 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) }, 835 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
706#ifdef CONFIG_RT2800USB_RT30XX 836#ifdef CONFIG_RT2800USB_RT33XX
707 /* Abocom */
708 { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
709 { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
710 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
711 /* AirTies */
712 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
713 /* Allwin */
714 { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
715 { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
716 { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
717 /* ASUS */
718 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
719 /* AzureWave */
720 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
721 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
722 { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
723 { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
724 /* Conceptronic */
725 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
726 /* Corega */
727 { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
728 /* D-Link */
729 { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
730 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
731 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
732 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
733 { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
734 /* Draytek */
735 { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
736 /* Edimax */
737 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
738 /* Encore */
739 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
740 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
741 /* EnGenius */
742 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
743 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
744 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
745 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
746 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
747 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
748 /* Gigabyte */
749 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
750 /* I-O DATA */
751 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
752 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
753 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
754 /* Logitec */
755 { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
756 /* MSI */
757 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
758 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
759 { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
760 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
761 { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
762 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
763 { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
764 { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
765 { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
766 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
767 { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
768 { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
769 { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
770 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
771 /* Para */
772 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
773 /* Pegatron */
774 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
775 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
776 /* Planex */
777 { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
778 /* Quanta */
779 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
780 /* Ralink */ 837 /* Ralink */
781 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, 838 { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) },
782 { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, 839 { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
783 { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
784 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
785 /* Sitecom */ 840 /* Sitecom */
786 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) }, 841 { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
787 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
788 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
789 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
790 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
791 /* SMC */
792 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
793 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
794 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
795 { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
796 /* Zinwell */
797 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
798 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
799#endif 842#endif
800#ifdef CONFIG_RT2800USB_RT35XX 843#ifdef CONFIG_RT2800USB_RT35XX
801 /* Allwin */ 844 /* Allwin */
@@ -809,12 +852,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
809 /* I-O DATA */ 852 /* I-O DATA */
810 { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) }, 853 { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
811 /* Ralink */ 854 /* Ralink */
812 { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) },
813 { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) }, 855 { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
814 { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
815 /* Sitecom */ 856 /* Sitecom */
816 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, 857 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
817 { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
818 /* Zinwell */ 858 /* Zinwell */
819 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) }, 859 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
820#endif 860#endif
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 0722badccf8..671ea359261 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -40,8 +40,8 @@
40/* 40/*
41 * DMA descriptor defines. 41 * DMA descriptor defines.
42 */ 42 */
43#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 43#define TXINFO_DESC_SIZE (1 * sizeof(__le32))
44#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 44#define RXINFO_DESC_SIZE (1 * sizeof(__le32))
45 45
46/* 46/*
47 * TX Info structure 47 * TX Info structure
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 94fe589acfa..0a55eeff871 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -915,7 +915,7 @@ struct rt2x00_dev {
915 * in those cases REGISTER_BUSY_COUNT attempts should be 915 * in those cases REGISTER_BUSY_COUNT attempts should be
916 * taken with a REGISTER_BUSY_DELAY interval. 916 * taken with a REGISTER_BUSY_DELAY interval.
917 */ 917 */
918#define REGISTER_BUSY_COUNT 5 918#define REGISTER_BUSY_COUNT 100
919#define REGISTER_BUSY_DELAY 100 919#define REGISTER_BUSY_DELAY 100
920 920
921/* 921/*
@@ -1133,6 +1133,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
1133int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 1133int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1134 const struct ieee80211_tx_queue_params *params); 1134 const struct ieee80211_tx_queue_params *params);
1135void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw); 1135void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
1136void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop);
1136 1137
1137/* 1138/*
1138 * Driver allocation handlers. 1139 * Driver allocation handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 54ffb5aeb34..a238e908c85 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -133,7 +133,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
133 */ 133 */
134 if (!(ant->flags & ANTENNA_RX_DIVERSITY)) 134 if (!(ant->flags & ANTENNA_RX_DIVERSITY))
135 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx); 135 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx);
136 else if(config.rx == ANTENNA_SW_DIVERSITY) 136 else if (config.rx == ANTENNA_SW_DIVERSITY)
137 config.rx = active->rx; 137 config.rx = active->rx;
138 138
139 if (!(ant->flags & ANTENNA_TX_DIVERSITY)) 139 if (!(ant->flags & ANTENNA_TX_DIVERSITY))
@@ -146,7 +146,8 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
146 * else the changes will be ignored by the device. 146 * else the changes will be ignored by the device.
147 */ 147 */
148 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 148 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
149 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK); 149 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
150 STATE_RADIO_RX_OFF);
150 151
151 /* 152 /*
152 * Write new antenna setup to device and reset the link tuner. 153 * Write new antenna setup to device and reset the link tuner.
@@ -160,7 +161,8 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
160 memcpy(active, &config, sizeof(config)); 161 memcpy(active, &config, sizeof(config));
161 162
162 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 163 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
163 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); 164 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
165 STATE_RADIO_RX_ON);
164} 166}
165 167
166void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 168void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index fcdb6b0dc40..64dfb1f6823 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -162,11 +162,11 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
162 struct timeval timestamp; 162 struct timeval timestamp;
163 u32 data_len; 163 u32 data_len;
164 164
165 do_gettimeofday(&timestamp); 165 if (likely(!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)))
166
167 if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags))
168 return; 166 return;
169 167
168 do_gettimeofday(&timestamp);
169
170 if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) { 170 if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) {
171 DEBUG(rt2x00dev, "txrx dump queue length exceeded.\n"); 171 DEBUG(rt2x00dev, "txrx dump queue length exceeded.\n");
172 return; 172 return;
@@ -342,7 +342,7 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
342 sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdma done\tdone\n"); 342 sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdma done\tdone\n");
343 343
344 queue_for_each(intf->rt2x00dev, queue) { 344 queue_for_each(intf->rt2x00dev, queue) {
345 spin_lock_irqsave(&queue->lock, irqflags); 345 spin_lock_irqsave(&queue->index_lock, irqflags);
346 346
347 temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid, 347 temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid,
348 queue->count, queue->limit, queue->length, 348 queue->count, queue->limit, queue->length,
@@ -350,7 +350,7 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
350 queue->index[Q_INDEX_DMA_DONE], 350 queue->index[Q_INDEX_DMA_DONE],
351 queue->index[Q_INDEX_DONE]); 351 queue->index[Q_INDEX_DONE]);
352 352
353 spin_unlock_irqrestore(&queue->lock, irqflags); 353 spin_unlock_irqrestore(&queue->index_lock, irqflags);
354 } 354 }
355 355
356 size = strlen(data); 356 size = strlen(data);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 5ba79b935f0..c879f9a7037 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -68,7 +68,8 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
68 /* 68 /*
69 * Enable RX. 69 * Enable RX.
70 */ 70 */
71 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); 71 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_RX_ON);
72 rt2x00link_start_tuner(rt2x00dev);
72 73
73 /* 74 /*
74 * Start watchdog monitoring. 75 * Start watchdog monitoring.
@@ -102,7 +103,8 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
102 /* 103 /*
103 * Disable RX. 104 * Disable RX.
104 */ 105 */
105 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); 106 rt2x00link_stop_tuner(rt2x00dev);
107 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_RX_OFF);
106 108
107 /* 109 /*
108 * Disable radio. 110 * Disable radio.
@@ -113,23 +115,6 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
113 rt2x00leds_led_radio(rt2x00dev, false); 115 rt2x00leds_led_radio(rt2x00dev, false);
114} 116}
115 117
116void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state)
117{
118 /*
119 * When we are disabling the RX, we should also stop the link tuner.
120 */
121 if (state == STATE_RADIO_RX_OFF)
122 rt2x00link_stop_tuner(rt2x00dev);
123
124 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
125
126 /*
127 * When we are enabling the RX, we should also start the link tuner.
128 */
129 if (state == STATE_RADIO_RX_ON)
130 rt2x00link_start_tuner(rt2x00dev);
131}
132
133static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac, 118static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
134 struct ieee80211_vif *vif) 119 struct ieee80211_vif *vif)
135{ 120{
@@ -265,10 +250,9 @@ void rt2x00lib_txdone(struct queue_entry *entry,
265 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 250 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
266 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 251 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
267 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb); 252 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
268 unsigned int header_length = ieee80211_get_hdrlen_from_skb(entry->skb); 253 unsigned int header_length, i;
269 u8 rate_idx, rate_flags, retry_rates; 254 u8 rate_idx, rate_flags, retry_rates;
270 u8 skbdesc_flags = skbdesc->flags; 255 u8 skbdesc_flags = skbdesc->flags;
271 unsigned int i;
272 bool success; 256 bool success;
273 257
274 /* 258 /*
@@ -287,6 +271,11 @@ void rt2x00lib_txdone(struct queue_entry *entry,
287 skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; 271 skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
288 272
289 /* 273 /*
274 * Determine the length of 802.11 header.
275 */
276 header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
277
278 /*
290 * Remove L2 padding which was added during 279 * Remove L2 padding which was added during
291 */ 280 */
292 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags)) 281 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
@@ -483,6 +472,10 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
483 unsigned int header_length; 472 unsigned int header_length;
484 int rate_idx; 473 int rate_idx;
485 474
475 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
476 !test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
477 goto submit_entry;
478
486 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) 479 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
487 goto submit_entry; 480 goto submit_entry;
488 481
@@ -567,9 +560,13 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
567 entry->skb = skb; 560 entry->skb = skb;
568 561
569submit_entry: 562submit_entry:
570 rt2x00dev->ops->lib->clear_entry(entry); 563 entry->flags = 0;
571 rt2x00queue_index_inc(entry->queue, Q_INDEX);
572 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 564 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
565 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
566 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) {
567 rt2x00dev->ops->lib->clear_entry(entry);
568 rt2x00queue_index_inc(entry->queue, Q_INDEX);
569 }
573} 570}
574EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); 571EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
575 572
@@ -678,7 +675,7 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry,
678{ 675{
679 entry->flags = 0; 676 entry->flags = 0;
680 entry->bitrate = rate->bitrate; 677 entry->bitrate = rate->bitrate;
681 entry->hw_value =index; 678 entry->hw_value = index;
682 entry->hw_value_short = index; 679 entry->hw_value_short = index;
683 680
684 if (rate->flags & DEV_RATE_SHORT_PREAMBLE) 681 if (rate->flags & DEV_RATE_SHORT_PREAMBLE)
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 619da23b7b5..2cf68f82674 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -57,7 +57,7 @@ static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value)
57} 57}
58 58
59#define RATE_MCS(__mode, __mcs) \ 59#define RATE_MCS(__mode, __mcs) \
60 ( (((__mode) & 0x00ff) << 8) | ((__mcs) & 0x00ff) ) 60 ((((__mode) & 0x00ff) << 8) | ((__mcs) & 0x00ff))
61 61
62static inline int rt2x00_get_rate_mcs(const u16 mcs_value) 62static inline int rt2x00_get_rate_mcs(const u16 mcs_value)
63{ 63{
@@ -69,7 +69,6 @@ static inline int rt2x00_get_rate_mcs(const u16 mcs_value)
69 */ 69 */
70int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev); 70int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev);
71void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev); 71void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev);
72void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state);
73 72
74/* 73/*
75 * Initialization handlers. 74 * Initialization handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index b971d8798eb..bfda60eaf4e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -67,7 +67,7 @@
67 (__avg).avg_weight ? \ 67 (__avg).avg_weight ? \
68 ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \ 68 ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
69 ((__val) * (AVG_FACTOR))) / \ 69 ((__val) * (AVG_FACTOR))) / \
70 (AVG_SAMPLES) ) : \ 70 (AVG_SAMPLES)) : \
71 ((__val) * (AVG_FACTOR)); \ 71 ((__val) * (AVG_FACTOR)); \
72 __new.avg = __new.avg_weight / (AVG_FACTOR); \ 72 __new.avg = __new.avg_weight / (AVG_FACTOR); \
73 __new; \ 73 __new; \
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index c3c206a97d5..829bf4be9bc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -283,14 +283,8 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
283 * invalid behavior in the device. 283 * invalid behavior in the device.
284 */ 284 */
285 memcpy(&intf->mac, vif->addr, ETH_ALEN); 285 memcpy(&intf->mac, vif->addr, ETH_ALEN);
286 if (vif->type == NL80211_IFTYPE_AP) { 286 rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
287 memcpy(&intf->bssid, vif->addr, ETH_ALEN); 287 intf->mac, NULL);
288 rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
289 intf->mac, intf->bssid);
290 } else {
291 rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
292 intf->mac, NULL);
293 }
294 288
295 /* 289 /*
296 * Some filters depend on the current working mode. We can force 290 * Some filters depend on the current working mode. We can force
@@ -358,7 +352,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
358 * if for any reason the link tuner must be reset, this will be 352 * if for any reason the link tuner must be reset, this will be
359 * handled by rt2x00lib_config(). 353 * handled by rt2x00lib_config().
360 */ 354 */
361 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK); 355 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_RX_OFF);
362 356
363 /* 357 /*
364 * When we've just turned on the radio, we want to reprogram 358 * When we've just turned on the radio, we want to reprogram
@@ -376,7 +370,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
376 rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant); 370 rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant);
377 371
378 /* Turn RX back on */ 372 /* Turn RX back on */
379 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); 373 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_RX_ON);
380 374
381 return 0; 375 return 0;
382} 376}
@@ -719,3 +713,41 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
719 wiphy_rfkill_set_hw_state(hw->wiphy, !active); 713 wiphy_rfkill_set_hw_state(hw->wiphy, !active);
720} 714}
721EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll); 715EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
716
717void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop)
718{
719 struct rt2x00_dev *rt2x00dev = hw->priv;
720 struct data_queue *queue;
721 unsigned int i = 0;
722
723 ieee80211_stop_queues(hw);
724
725 /*
726 * Run over all queues to kick them, this will force
727 * any pending frames to be transmitted.
728 */
729 tx_queue_for_each(rt2x00dev, queue) {
730 rt2x00dev->ops->lib->kick_tx_queue(queue);
731 }
732
733 /**
734 * All queues have been kicked, now wait for each queue
735 * to become empty. With a bit of luck, we only have to wait
736 * for the first queue to become empty, because while waiting
737 * for the that queue, the other queues will have transmitted
738 * all their frames as well (since they were already kicked).
739 */
740 tx_queue_for_each(rt2x00dev, queue) {
741 for (i = 0; i < 10; i++) {
742 if (rt2x00queue_empty(queue))
743 break;
744 msleep(100);
745 }
746
747 if (!rt2x00queue_empty(queue))
748 WARNING(rt2x00dev, "Failed to flush queue %d", queue->qid);
749 }
750
751 ieee80211_wake_queues(hw);
752}
753EXPORT_SYMBOL_GPL(rt2x00mac_flush);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 2449d785cf8..868ca19b13e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -105,7 +105,7 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
105 */ 105 */
106 addr = dma_alloc_coherent(rt2x00dev->dev, 106 addr = dma_alloc_coherent(rt2x00dev->dev,
107 queue->limit * queue->desc_size, 107 queue->limit * queue->desc_size,
108 &dma, GFP_KERNEL | GFP_DMA); 108 &dma, GFP_KERNEL);
109 if (!addr) 109 if (!addr)
110 return -ENOMEM; 110 return -ENOMEM;
111 111
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index e360d287def..a3d79c7a21c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -204,8 +204,10 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
204 if (!l2pad) 204 if (!l2pad)
205 return; 205 return;
206 206
207 memmove(skb->data + l2pad, skb->data, header_length); 207 memmove(skb->data + header_length, skb->data + header_length + l2pad,
208 skb_pull(skb, l2pad); 208 skb->len - header_length - l2pad);
209
210 skb_trim(skb, skb->len - l2pad);
209} 211}
210 212
211static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, 213static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
@@ -311,14 +313,6 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
311 memset(txdesc, 0, sizeof(*txdesc)); 313 memset(txdesc, 0, sizeof(*txdesc));
312 314
313 /* 315 /*
314 * Initialize information from queue
315 */
316 txdesc->qid = entry->queue->qid;
317 txdesc->cw_min = entry->queue->cw_min;
318 txdesc->cw_max = entry->queue->cw_max;
319 txdesc->aifs = entry->queue->aifs;
320
321 /*
322 * Header and frame information. 316 * Header and frame information.
323 */ 317 */
324 txdesc->length = entry->skb->len; 318 txdesc->length = entry->skb->len;
@@ -460,12 +454,9 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
460 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); 454 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
461} 455}
462 456
463static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, 457static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
464 struct txentry_desc *txdesc) 458 struct txentry_desc *txdesc)
465{ 459{
466 struct data_queue *queue = entry->queue;
467 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
468
469 /* 460 /*
470 * Check if we need to kick the queue, there are however a few rules 461 * Check if we need to kick the queue, there are however a few rules
471 * 1) Don't kick unless this is the last in frame in a burst. 462 * 1) Don't kick unless this is the last in frame in a burst.
@@ -477,7 +468,7 @@ static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
477 */ 468 */
478 if (rt2x00queue_threshold(queue) || 469 if (rt2x00queue_threshold(queue) ||
479 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 470 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
480 rt2x00dev->ops->lib->kick_tx_queue(queue); 471 queue->rt2x00dev->ops->lib->kick_tx_queue(queue);
481} 472}
482 473
483int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 474int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
@@ -567,7 +558,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
567 558
568 rt2x00queue_index_inc(queue, Q_INDEX); 559 rt2x00queue_index_inc(queue, Q_INDEX);
569 rt2x00queue_write_tx_descriptor(entry, &txdesc); 560 rt2x00queue_write_tx_descriptor(entry, &txdesc);
570 rt2x00queue_kick_tx_queue(entry, &txdesc); 561 rt2x00queue_kick_tx_queue(queue, &txdesc);
571 562
572 return 0; 563 return 0;
573} 564}
@@ -649,10 +640,10 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
649 * it should not be kicked during this run, since it 640 * it should not be kicked during this run, since it
650 * is part of another TX operation. 641 * is part of another TX operation.
651 */ 642 */
652 spin_lock_irqsave(&queue->lock, irqflags); 643 spin_lock_irqsave(&queue->index_lock, irqflags);
653 index_start = queue->index[start]; 644 index_start = queue->index[start];
654 index_end = queue->index[end]; 645 index_end = queue->index[end];
655 spin_unlock_irqrestore(&queue->lock, irqflags); 646 spin_unlock_irqrestore(&queue->index_lock, irqflags);
656 647
657 /* 648 /*
658 * Start from the TX done pointer, this guarentees that we will 649 * Start from the TX done pointer, this guarentees that we will
@@ -706,11 +697,11 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
706 return NULL; 697 return NULL;
707 } 698 }
708 699
709 spin_lock_irqsave(&queue->lock, irqflags); 700 spin_lock_irqsave(&queue->index_lock, irqflags);
710 701
711 entry = &queue->entries[queue->index[index]]; 702 entry = &queue->entries[queue->index[index]];
712 703
713 spin_unlock_irqrestore(&queue->lock, irqflags); 704 spin_unlock_irqrestore(&queue->index_lock, irqflags);
714 705
715 return entry; 706 return entry;
716} 707}
@@ -726,7 +717,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
726 return; 717 return;
727 } 718 }
728 719
729 spin_lock_irqsave(&queue->lock, irqflags); 720 spin_lock_irqsave(&queue->index_lock, irqflags);
730 721
731 queue->index[index]++; 722 queue->index[index]++;
732 if (queue->index[index] >= queue->limit) 723 if (queue->index[index] >= queue->limit)
@@ -741,7 +732,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
741 queue->count++; 732 queue->count++;
742 } 733 }
743 734
744 spin_unlock_irqrestore(&queue->lock, irqflags); 735 spin_unlock_irqrestore(&queue->index_lock, irqflags);
745} 736}
746 737
747static void rt2x00queue_reset(struct data_queue *queue) 738static void rt2x00queue_reset(struct data_queue *queue)
@@ -749,7 +740,7 @@ static void rt2x00queue_reset(struct data_queue *queue)
749 unsigned long irqflags; 740 unsigned long irqflags;
750 unsigned int i; 741 unsigned int i;
751 742
752 spin_lock_irqsave(&queue->lock, irqflags); 743 spin_lock_irqsave(&queue->index_lock, irqflags);
753 744
754 queue->count = 0; 745 queue->count = 0;
755 queue->length = 0; 746 queue->length = 0;
@@ -759,7 +750,7 @@ static void rt2x00queue_reset(struct data_queue *queue)
759 queue->last_action[i] = jiffies; 750 queue->last_action[i] = jiffies;
760 } 751 }
761 752
762 spin_unlock_irqrestore(&queue->lock, irqflags); 753 spin_unlock_irqrestore(&queue->index_lock, irqflags);
763} 754}
764 755
765void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) 756void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
@@ -809,8 +800,8 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
809 return -ENOMEM; 800 return -ENOMEM;
810 801
811#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ 802#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
812 ( ((char *)(__base)) + ((__limit) * (__esize)) + \ 803 (((char *)(__base)) + ((__limit) * (__esize)) + \
813 ((__index) * (__psize)) ) 804 ((__index) * (__psize)))
814 805
815 for (i = 0; i < queue->limit; i++) { 806 for (i = 0; i < queue->limit; i++) {
816 entries[i].flags = 0; 807 entries[i].flags = 0;
@@ -911,7 +902,7 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
911static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, 902static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
912 struct data_queue *queue, enum data_queue_qid qid) 903 struct data_queue *queue, enum data_queue_qid qid)
913{ 904{
914 spin_lock_init(&queue->lock); 905 spin_lock_init(&queue->index_lock);
915 906
916 queue->rt2x00dev = rt2x00dev; 907 queue->rt2x00dev = rt2x00dev;
917 queue->qid = qid; 908 queue->qid = qid;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index d81d85f3486..29b051ac640 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -43,22 +43,6 @@
43#define AGGREGATION_SIZE 3840 43#define AGGREGATION_SIZE 3840
44 44
45/** 45/**
46 * DOC: Number of entries per queue
47 *
48 * Under normal load without fragmentation, 12 entries are sufficient
49 * without the queue being filled up to the maximum. When using fragmentation
50 * and the queue threshold code, we need to add some additional margins to
51 * make sure the queue will never (or only under extreme load) fill up
52 * completely.
53 * Since we don't use preallocated DMA, having a large number of queue entries
54 * will have minimal impact on the memory requirements for the queue.
55 */
56#define RX_ENTRIES 24
57#define TX_ENTRIES 24
58#define BEACON_ENTRIES 1
59#define ATIM_ENTRIES 8
60
61/**
62 * enum data_queue_qid: Queue identification 46 * enum data_queue_qid: Queue identification
63 * 47 *
64 * @QID_AC_BE: AC BE queue 48 * @QID_AC_BE: AC BE queue
@@ -296,7 +280,6 @@ enum txentry_desc_flags {
296 * Summary of information for the frame descriptor before sending a TX frame. 280 * Summary of information for the frame descriptor before sending a TX frame.
297 * 281 *
298 * @flags: Descriptor flags (See &enum queue_entry_flags). 282 * @flags: Descriptor flags (See &enum queue_entry_flags).
299 * @qid: Queue identification (See &enum data_queue_qid).
300 * @length: Length of the entire frame. 283 * @length: Length of the entire frame.
301 * @header_length: Length of 802.11 header. 284 * @header_length: Length of 802.11 header.
302 * @length_high: PLCP length high word. 285 * @length_high: PLCP length high word.
@@ -309,11 +292,8 @@ enum txentry_desc_flags {
309 * @rate_mode: Rate mode (See @enum rate_modulation). 292 * @rate_mode: Rate mode (See @enum rate_modulation).
310 * @mpdu_density: MDPU density. 293 * @mpdu_density: MDPU density.
311 * @retry_limit: Max number of retries. 294 * @retry_limit: Max number of retries.
312 * @aifs: AIFS value.
313 * @ifs: IFS value. 295 * @ifs: IFS value.
314 * @txop: IFS value for 11n capable chips. 296 * @txop: IFS value for 11n capable chips.
315 * @cw_min: cwmin value.
316 * @cw_max: cwmax value.
317 * @cipher: Cipher type used for encryption. 297 * @cipher: Cipher type used for encryption.
318 * @key_idx: Key index used for encryption. 298 * @key_idx: Key index used for encryption.
319 * @iv_offset: Position where IV should be inserted by hardware. 299 * @iv_offset: Position where IV should be inserted by hardware.
@@ -322,8 +302,6 @@ enum txentry_desc_flags {
322struct txentry_desc { 302struct txentry_desc {
323 unsigned long flags; 303 unsigned long flags;
324 304
325 enum data_queue_qid qid;
326
327 u16 length; 305 u16 length;
328 u16 header_length; 306 u16 header_length;
329 307
@@ -339,11 +317,8 @@ struct txentry_desc {
339 u16 mpdu_density; 317 u16 mpdu_density;
340 318
341 short retry_limit; 319 short retry_limit;
342 short aifs;
343 short ifs; 320 short ifs;
344 short txop; 321 short txop;
345 short cw_min;
346 short cw_max;
347 322
348 enum cipher cipher; 323 enum cipher cipher;
349 u16 key_idx; 324 u16 key_idx;
@@ -423,7 +398,7 @@ enum queue_index {
423 * @entries: Base address of the &struct queue_entry which are 398 * @entries: Base address of the &struct queue_entry which are
424 * part of this queue. 399 * part of this queue.
425 * @qid: The queue identification, see &enum data_queue_qid. 400 * @qid: The queue identification, see &enum data_queue_qid.
426 * @lock: Spinlock to protect index handling. Whenever @index, @index_done or 401 * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
427 * @index_crypt needs to be changed this lock should be grabbed to prevent 402 * @index_crypt needs to be changed this lock should be grabbed to prevent
428 * index corruption due to concurrency. 403 * index corruption due to concurrency.
429 * @count: Number of frames handled in the queue. 404 * @count: Number of frames handled in the queue.
@@ -447,7 +422,7 @@ struct data_queue {
447 422
448 enum data_queue_qid qid; 423 enum data_queue_qid qid;
449 424
450 spinlock_t lock; 425 spinlock_t index_lock;
451 unsigned int count; 426 unsigned int count;
452 unsigned short limit; 427 unsigned short limit;
453 unsigned short threshold; 428 unsigned short threshold;
@@ -618,10 +593,10 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
618} 593}
619 594
620/** 595/**
621 * rt2x00queue_timeout - Check if a timeout occured for STATUS reorts 596 * rt2x00queue_status_timeout - Check if a timeout occured for STATUS reports
622 * @queue: Queue to check. 597 * @queue: Queue to check.
623 */ 598 */
624static inline int rt2x00queue_timeout(struct data_queue *queue) 599static inline int rt2x00queue_status_timeout(struct data_queue *queue)
625{ 600{
626 return time_after(queue->last_action[Q_INDEX_DMA_DONE], 601 return time_after(queue->last_action[Q_INDEX_DMA_DONE],
627 queue->last_action[Q_INDEX_DONE] + (HZ / 10)); 602 queue->last_action[Q_INDEX_DONE] + (HZ / 10));
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index cef94621cef..ed71be95136 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -85,8 +85,6 @@ enum dev_state {
85 STATE_RADIO_OFF, 85 STATE_RADIO_OFF,
86 STATE_RADIO_RX_ON, 86 STATE_RADIO_RX_ON,
87 STATE_RADIO_RX_OFF, 87 STATE_RADIO_RX_OFF,
88 STATE_RADIO_RX_ON_LINK,
89 STATE_RADIO_RX_OFF_LINK,
90 STATE_RADIO_IRQ_ON, 88 STATE_RADIO_IRQ_ON,
91 STATE_RADIO_IRQ_OFF, 89 STATE_RADIO_IRQ_OFF,
92 STATE_RADIO_IRQ_ON_ISR, 90 STATE_RADIO_IRQ_ON_ISR,
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
index fc98063de71..2aa5c38022f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -40,6 +40,8 @@ static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev)
40 40
41 kfree(rt2x00dev->eeprom); 41 kfree(rt2x00dev->eeprom);
42 rt2x00dev->eeprom = NULL; 42 rt2x00dev->eeprom = NULL;
43
44 iounmap(rt2x00dev->csr.base);
43} 45}
44 46
45static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev) 47static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
@@ -51,9 +53,9 @@ static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
51 if (!res) 53 if (!res)
52 return -ENODEV; 54 return -ENODEV;
53 55
54 rt2x00dev->csr.base = (void __iomem *)KSEG1ADDR(res->start); 56 rt2x00dev->csr.base = ioremap(res->start, resource_size(res));
55 if (!rt2x00dev->csr.base) 57 if (!rt2x00dev->csr.base)
56 goto exit; 58 return -ENOMEM;
57 59
58 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); 60 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
59 if (!rt2x00dev->eeprom) 61 if (!rt2x00dev->eeprom)
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index b3317df7a7d..9ac14598e2a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -226,9 +226,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
226 * Schedule the delayed work for reading the TX status 226 * Schedule the delayed work for reading the TX status
227 * from the device. 227 * from the device.
228 */ 228 */
229 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && 229 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
230 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
231 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
232} 230}
233 231
234static void rt2x00usb_kick_tx_entry(struct queue_entry *entry) 232static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
@@ -323,21 +321,6 @@ static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
323 rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work); 321 rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);
324 322
325 /* 323 /*
326 * Security measure: if the driver did override the
327 * txdone_work function, and the hardware did arrive
328 * in a state which causes it to malfunction, it is
329 * possible that the driver couldn't handle the txdone
330 * event correctly. So after giving the driver the
331 * chance to cleanup, we now force a cleanup of any
332 * leftovers.
333 */
334 if (!rt2x00queue_empty(queue)) {
335 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
336 " status handling failed, invoke hard reset", queue->qid);
337 rt2x00usb_work_txdone(&rt2x00dev->txdone_work);
338 }
339
340 /*
341 * The queue has been reset, and mac80211 is allowed to use the 324 * The queue has been reset, and mac80211 is allowed to use the
342 * queue again. 325 * queue again.
343 */ 326 */
@@ -361,7 +344,7 @@ void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
361 if (!rt2x00queue_empty(queue)) { 344 if (!rt2x00queue_empty(queue)) {
362 if (rt2x00queue_dma_timeout(queue)) 345 if (rt2x00queue_dma_timeout(queue))
363 rt2x00usb_watchdog_tx_dma(queue); 346 rt2x00usb_watchdog_tx_dma(queue);
364 if (rt2x00queue_timeout(queue)) 347 if (rt2x00queue_status_timeout(queue))
365 rt2x00usb_watchdog_tx_status(queue); 348 rt2x00usb_watchdog_tx_status(queue);
366 } 349 }
367 } 350 }
@@ -424,9 +407,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
424 * Schedule the delayed work for reading the RX status 407 * Schedule the delayed work for reading the RX status
425 * from the device. 408 * from the device.
426 */ 409 */
427 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && 410 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
428 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
429 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
430} 411}
431 412
432/* 413/*
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index af548c87f10..6b09b01f634 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1623,8 +1623,7 @@ static void rt61pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
1623 1623
1624 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg); 1624 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
1625 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1625 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX,
1626 (state == STATE_RADIO_RX_OFF) || 1626 (state == STATE_RADIO_RX_OFF));
1627 (state == STATE_RADIO_RX_OFF_LINK));
1628 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 1627 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
1629} 1628}
1630 1629
@@ -1745,9 +1744,7 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1745 rt61pci_disable_radio(rt2x00dev); 1744 rt61pci_disable_radio(rt2x00dev);
1746 break; 1745 break;
1747 case STATE_RADIO_RX_ON: 1746 case STATE_RADIO_RX_ON:
1748 case STATE_RADIO_RX_ON_LINK:
1749 case STATE_RADIO_RX_OFF: 1747 case STATE_RADIO_RX_OFF:
1750 case STATE_RADIO_RX_OFF_LINK:
1751 rt61pci_toggle_rx(rt2x00dev, state); 1748 rt61pci_toggle_rx(rt2x00dev, state);
1752 break; 1749 break;
1753 case STATE_RADIO_IRQ_ON: 1750 case STATE_RADIO_IRQ_ON:
@@ -1789,10 +1786,10 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
1789 * Start writing the descriptor words. 1786 * Start writing the descriptor words.
1790 */ 1787 */
1791 rt2x00_desc_read(txd, 1, &word); 1788 rt2x00_desc_read(txd, 1, &word);
1792 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid); 1789 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
1793 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1790 rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
1794 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1791 rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
1795 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1792 rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
1796 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); 1793 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1797 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1794 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1798 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); 1795 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
@@ -1820,7 +1817,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
1820 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1817 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1821 rt2x00_desc_write(txd, 5, word); 1818 rt2x00_desc_write(txd, 5, word);
1822 1819
1823 if (txdesc->qid != QID_BEACON) { 1820 if (entry->queue->qid != QID_BEACON) {
1824 rt2x00_desc_read(txd, 6, &word); 1821 rt2x00_desc_read(txd, 6, &word);
1825 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, 1822 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
1826 skbdesc->skb_dma); 1823 skbdesc->skb_dma);
@@ -1866,8 +1863,8 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
1866 * Register descriptor details in skb frame descriptor. 1863 * Register descriptor details in skb frame descriptor.
1867 */ 1864 */
1868 skbdesc->desc = txd; 1865 skbdesc->desc = txd;
1869 skbdesc->desc_len = 1866 skbdesc->desc_len = (entry->queue->qid == QID_BEACON) ? TXINFO_SIZE :
1870 (txdesc->qid == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE; 1867 TXD_DESC_SIZE;
1871} 1868}
1872 1869
1873/* 1870/*
@@ -2078,7 +2075,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2078 * that the TX_STA_FIFO stack has a size of 16. We stick to our 2075 * that the TX_STA_FIFO stack has a size of 16. We stick to our
2079 * tx ring size for now. 2076 * tx ring size for now.
2080 */ 2077 */
2081 for (i = 0; i < TX_ENTRIES; i++) { 2078 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
2082 rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg); 2079 rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg);
2083 if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) 2080 if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
2084 break; 2081 break;
@@ -2824,6 +2821,7 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2824 .conf_tx = rt61pci_conf_tx, 2821 .conf_tx = rt61pci_conf_tx,
2825 .get_tsf = rt61pci_get_tsf, 2822 .get_tsf = rt61pci_get_tsf,
2826 .rfkill_poll = rt2x00mac_rfkill_poll, 2823 .rfkill_poll = rt2x00mac_rfkill_poll,
2824 .flush = rt2x00mac_flush,
2827}; 2825};
2828 2826
2829static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { 2827static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
@@ -2857,21 +2855,21 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2857}; 2855};
2858 2856
2859static const struct data_queue_desc rt61pci_queue_rx = { 2857static const struct data_queue_desc rt61pci_queue_rx = {
2860 .entry_num = RX_ENTRIES, 2858 .entry_num = 32,
2861 .data_size = DATA_FRAME_SIZE, 2859 .data_size = DATA_FRAME_SIZE,
2862 .desc_size = RXD_DESC_SIZE, 2860 .desc_size = RXD_DESC_SIZE,
2863 .priv_size = sizeof(struct queue_entry_priv_pci), 2861 .priv_size = sizeof(struct queue_entry_priv_pci),
2864}; 2862};
2865 2863
2866static const struct data_queue_desc rt61pci_queue_tx = { 2864static const struct data_queue_desc rt61pci_queue_tx = {
2867 .entry_num = TX_ENTRIES, 2865 .entry_num = 32,
2868 .data_size = DATA_FRAME_SIZE, 2866 .data_size = DATA_FRAME_SIZE,
2869 .desc_size = TXD_DESC_SIZE, 2867 .desc_size = TXD_DESC_SIZE,
2870 .priv_size = sizeof(struct queue_entry_priv_pci), 2868 .priv_size = sizeof(struct queue_entry_priv_pci),
2871}; 2869};
2872 2870
2873static const struct data_queue_desc rt61pci_queue_bcn = { 2871static const struct data_queue_desc rt61pci_queue_bcn = {
2874 .entry_num = 4 * BEACON_ENTRIES, 2872 .entry_num = 4,
2875 .data_size = 0, /* No DMA required for beacons */ 2873 .data_size = 0, /* No DMA required for beacons */
2876 .desc_size = TXINFO_SIZE, 2874 .desc_size = TXINFO_SIZE,
2877 .priv_size = sizeof(struct queue_entry_priv_pci), 2875 .priv_size = sizeof(struct queue_entry_priv_pci),
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index e2e728ab0b2..afc803b7959 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -412,7 +412,7 @@ struct hw_pairwise_ta_entry {
412 * DROP_VERSION_ERROR: Drop version error frame. 412 * DROP_VERSION_ERROR: Drop version error frame.
413 * DROP_MULTICAST: Drop multicast frames. 413 * DROP_MULTICAST: Drop multicast frames.
414 * DROP_BORADCAST: Drop broadcast frames. 414 * DROP_BORADCAST: Drop broadcast frames.
415 * ROP_ACK_CTS: Drop received ACK and CTS. 415 * DROP_ACK_CTS: Drop received ACK and CTS.
416 */ 416 */
417#define TXRX_CSR0 0x3040 417#define TXRX_CSR0 0x3040
418#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff) 418#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff)
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 9be8089317e..6f04552f581 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -40,7 +40,7 @@
40/* 40/*
41 * Allow hardware encryption to be disabled. 41 * Allow hardware encryption to be disabled.
42 */ 42 */
43static int modparam_nohwcrypt = 0; 43static int modparam_nohwcrypt;
44module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 44module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
45MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 45MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
46 46
@@ -1331,8 +1331,7 @@ static void rt73usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
1331 1331
1332 rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg); 1332 rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
1333 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1333 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX,
1334 (state == STATE_RADIO_RX_OFF) || 1334 (state == STATE_RADIO_RX_OFF));
1335 (state == STATE_RADIO_RX_OFF_LINK));
1336 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); 1335 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
1337} 1336}
1338 1337
@@ -1403,9 +1402,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1403 rt73usb_disable_radio(rt2x00dev); 1402 rt73usb_disable_radio(rt2x00dev);
1404 break; 1403 break;
1405 case STATE_RADIO_RX_ON: 1404 case STATE_RADIO_RX_ON:
1406 case STATE_RADIO_RX_ON_LINK:
1407 case STATE_RADIO_RX_OFF: 1405 case STATE_RADIO_RX_OFF:
1408 case STATE_RADIO_RX_OFF_LINK:
1409 rt73usb_toggle_rx(rt2x00dev, state); 1406 rt73usb_toggle_rx(rt2x00dev, state);
1410 break; 1407 break;
1411 case STATE_RADIO_IRQ_ON: 1408 case STATE_RADIO_IRQ_ON:
@@ -1472,10 +1469,10 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
1472 rt2x00_desc_write(txd, 0, word); 1469 rt2x00_desc_write(txd, 0, word);
1473 1470
1474 rt2x00_desc_read(txd, 1, &word); 1471 rt2x00_desc_read(txd, 1, &word);
1475 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid); 1472 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
1476 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1473 rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
1477 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1474 rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
1478 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1475 rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
1479 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); 1476 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1480 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1477 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1481 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); 1478 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
@@ -2264,6 +2261,7 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
2264 .conf_tx = rt73usb_conf_tx, 2261 .conf_tx = rt73usb_conf_tx,
2265 .get_tsf = rt73usb_get_tsf, 2262 .get_tsf = rt73usb_get_tsf,
2266 .rfkill_poll = rt2x00mac_rfkill_poll, 2263 .rfkill_poll = rt2x00mac_rfkill_poll,
2264 .flush = rt2x00mac_flush,
2267}; 2265};
2268 2266
2269static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { 2267static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
@@ -2296,21 +2294,21 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2296}; 2294};
2297 2295
2298static const struct data_queue_desc rt73usb_queue_rx = { 2296static const struct data_queue_desc rt73usb_queue_rx = {
2299 .entry_num = RX_ENTRIES, 2297 .entry_num = 32,
2300 .data_size = DATA_FRAME_SIZE, 2298 .data_size = DATA_FRAME_SIZE,
2301 .desc_size = RXD_DESC_SIZE, 2299 .desc_size = RXD_DESC_SIZE,
2302 .priv_size = sizeof(struct queue_entry_priv_usb), 2300 .priv_size = sizeof(struct queue_entry_priv_usb),
2303}; 2301};
2304 2302
2305static const struct data_queue_desc rt73usb_queue_tx = { 2303static const struct data_queue_desc rt73usb_queue_tx = {
2306 .entry_num = TX_ENTRIES, 2304 .entry_num = 32,
2307 .data_size = DATA_FRAME_SIZE, 2305 .data_size = DATA_FRAME_SIZE,
2308 .desc_size = TXD_DESC_SIZE, 2306 .desc_size = TXD_DESC_SIZE,
2309 .priv_size = sizeof(struct queue_entry_priv_usb), 2307 .priv_size = sizeof(struct queue_entry_priv_usb),
2310}; 2308};
2311 2309
2312static const struct data_queue_desc rt73usb_queue_bcn = { 2310static const struct data_queue_desc rt73usb_queue_bcn = {
2313 .entry_num = 4 * BEACON_ENTRIES, 2311 .entry_num = 4,
2314 .data_size = MGMT_FRAME_SIZE, 2312 .data_size = MGMT_FRAME_SIZE,
2315 .desc_size = TXINFO_SIZE, 2313 .desc_size = TXINFO_SIZE,
2316 .priv_size = sizeof(struct queue_entry_priv_usb), 2314 .priv_size = sizeof(struct queue_entry_priv_usb),
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 44d5b2bebd3..1315ce5c992 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -322,7 +322,7 @@ struct hw_pairwise_ta_entry {
322 * DROP_VERSION_ERROR: Drop version error frame. 322 * DROP_VERSION_ERROR: Drop version error frame.
323 * DROP_MULTICAST: Drop multicast frames. 323 * DROP_MULTICAST: Drop multicast frames.
324 * DROP_BORADCAST: Drop broadcast frames. 324 * DROP_BORADCAST: Drop broadcast frames.
325 * ROP_ACK_CTS: Drop received ACK and CTS. 325 * DROP_ACK_CTS: Drop received ACK and CTS.
326 */ 326 */
327#define TXRX_CSR0 0x3040 327#define TXRX_CSR0 0x3040
328#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff) 328#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff)
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 38fa8244cc9..eeee244fcaa 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -553,6 +553,46 @@ static int rtl8187b_init_status_urb(struct ieee80211_hw *dev)
553 return ret; 553 return ret;
554} 554}
555 555
556static void rtl8187_set_anaparam(struct rtl8187_priv *priv, bool rfon)
557{
558 u32 anaparam, anaparam2;
559 u8 anaparam3, reg;
560
561 if (!priv->is_rtl8187b) {
562 if (rfon) {
563 anaparam = RTL8187_RTL8225_ANAPARAM_ON;
564 anaparam2 = RTL8187_RTL8225_ANAPARAM2_ON;
565 } else {
566 anaparam = RTL8187_RTL8225_ANAPARAM_OFF;
567 anaparam2 = RTL8187_RTL8225_ANAPARAM2_OFF;
568 }
569 } else {
570 if (rfon) {
571 anaparam = RTL8187B_RTL8225_ANAPARAM_ON;
572 anaparam2 = RTL8187B_RTL8225_ANAPARAM2_ON;
573 anaparam3 = RTL8187B_RTL8225_ANAPARAM3_ON;
574 } else {
575 anaparam = RTL8187B_RTL8225_ANAPARAM_OFF;
576 anaparam2 = RTL8187B_RTL8225_ANAPARAM2_OFF;
577 anaparam3 = RTL8187B_RTL8225_ANAPARAM3_OFF;
578 }
579 }
580
581 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
582 RTL818X_EEPROM_CMD_CONFIG);
583 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
584 reg |= RTL818X_CONFIG3_ANAPARAM_WRITE;
585 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
586 rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam);
587 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
588 if (priv->is_rtl8187b)
589 rtl818x_iowrite8(priv, &priv->map->ANAPARAM3, anaparam3);
590 reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
591 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
592 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
593 RTL818X_EEPROM_CMD_NORMAL);
594}
595
556static int rtl8187_cmd_reset(struct ieee80211_hw *dev) 596static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
557{ 597{
558 struct rtl8187_priv *priv = dev->priv; 598 struct rtl8187_priv *priv = dev->priv;
@@ -603,19 +643,7 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev)
603 int res; 643 int res;
604 644
605 /* reset */ 645 /* reset */
606 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, 646 rtl8187_set_anaparam(priv, true);
607 RTL818X_EEPROM_CMD_CONFIG);
608 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
609 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg |
610 RTL818X_CONFIG3_ANAPARAM_WRITE);
611 rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
612 RTL8187_RTL8225_ANAPARAM_ON);
613 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
614 RTL8187_RTL8225_ANAPARAM2_ON);
615 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg &
616 ~RTL818X_CONFIG3_ANAPARAM_WRITE);
617 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
618 RTL818X_EEPROM_CMD_NORMAL);
619 647
620 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 648 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
621 649
@@ -629,17 +657,7 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev)
629 if (res) 657 if (res)
630 return res; 658 return res;
631 659
632 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 660 rtl8187_set_anaparam(priv, true);
633 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
634 rtl818x_iowrite8(priv, &priv->map->CONFIG3,
635 reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
636 rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
637 RTL8187_RTL8225_ANAPARAM_ON);
638 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
639 RTL8187_RTL8225_ANAPARAM2_ON);
640 rtl818x_iowrite8(priv, &priv->map->CONFIG3,
641 reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
642 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
643 661
644 /* setup card */ 662 /* setup card */
645 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0); 663 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0);
@@ -712,10 +730,9 @@ static const u8 rtl8187b_reg_table[][3] = {
712 730
713 {0x58, 0x4B, 1}, {0x59, 0x00, 1}, {0x5A, 0x4B, 1}, {0x5B, 0x00, 1}, 731 {0x58, 0x4B, 1}, {0x59, 0x00, 1}, {0x5A, 0x4B, 1}, {0x5B, 0x00, 1},
714 {0x60, 0x4B, 1}, {0x61, 0x09, 1}, {0x62, 0x4B, 1}, {0x63, 0x09, 1}, 732 {0x60, 0x4B, 1}, {0x61, 0x09, 1}, {0x62, 0x4B, 1}, {0x63, 0x09, 1},
715 {0xCE, 0x0F, 1}, {0xCF, 0x00, 1}, {0xE0, 0xFF, 1}, {0xE1, 0x0F, 1}, 733 {0xCE, 0x0F, 1}, {0xCF, 0x00, 1}, {0xF0, 0x4E, 1}, {0xF1, 0x01, 1},
716 {0xE2, 0x00, 1}, {0xF0, 0x4E, 1}, {0xF1, 0x01, 1}, {0xF2, 0x02, 1}, 734 {0xF2, 0x02, 1}, {0xF3, 0x03, 1}, {0xF4, 0x04, 1}, {0xF5, 0x05, 1},
717 {0xF3, 0x03, 1}, {0xF4, 0x04, 1}, {0xF5, 0x05, 1}, {0xF6, 0x06, 1}, 735 {0xF6, 0x06, 1}, {0xF7, 0x07, 1}, {0xF8, 0x08, 1},
718 {0xF7, 0x07, 1}, {0xF8, 0x08, 1},
719 736
720 {0x4E, 0x00, 2}, {0x0C, 0x04, 2}, {0x21, 0x61, 2}, {0x22, 0x68, 2}, 737 {0x4E, 0x00, 2}, {0x0C, 0x04, 2}, {0x21, 0x61, 2}, {0x22, 0x68, 2},
721 {0x23, 0x6F, 2}, {0x24, 0x76, 2}, {0x25, 0x7D, 2}, {0x26, 0x84, 2}, 738 {0x23, 0x6F, 2}, {0x24, 0x76, 2}, {0x25, 0x7D, 2}, {0x26, 0x84, 2},
@@ -723,14 +740,13 @@ static const u8 rtl8187b_reg_table[][3] = {
723 {0x52, 0x04, 2}, {0x53, 0xA0, 2}, {0x54, 0x1F, 2}, {0x55, 0x23, 2}, 740 {0x52, 0x04, 2}, {0x53, 0xA0, 2}, {0x54, 0x1F, 2}, {0x55, 0x23, 2},
724 {0x56, 0x45, 2}, {0x57, 0x67, 2}, {0x58, 0x08, 2}, {0x59, 0x08, 2}, 741 {0x56, 0x45, 2}, {0x57, 0x67, 2}, {0x58, 0x08, 2}, {0x59, 0x08, 2},
725 {0x5A, 0x08, 2}, {0x5B, 0x08, 2}, {0x60, 0x08, 2}, {0x61, 0x08, 2}, 742 {0x5A, 0x08, 2}, {0x5B, 0x08, 2}, {0x60, 0x08, 2}, {0x61, 0x08, 2},
726 {0x62, 0x08, 2}, {0x63, 0x08, 2}, {0x64, 0xCF, 2}, {0x72, 0x56, 2}, 743 {0x62, 0x08, 2}, {0x63, 0x08, 2}, {0x64, 0xCF, 2},
727 {0x73, 0x9A, 2},
728 744
729 {0x34, 0xF0, 0}, {0x35, 0x0F, 0}, {0x5B, 0x40, 0}, {0x84, 0x88, 0}, 745 {0x5B, 0x40, 0}, {0x84, 0x88, 0}, {0x85, 0x24, 0}, {0x88, 0x54, 0},
730 {0x85, 0x24, 0}, {0x88, 0x54, 0}, {0x8B, 0xB8, 0}, {0x8C, 0x07, 0}, 746 {0x8B, 0xB8, 0}, {0x8C, 0x07, 0}, {0x8D, 0x00, 0}, {0x94, 0x1B, 0},
731 {0x8D, 0x00, 0}, {0x94, 0x1B, 0}, {0x95, 0x12, 0}, {0x96, 0x00, 0}, 747 {0x95, 0x12, 0}, {0x96, 0x00, 0}, {0x97, 0x06, 0}, {0x9D, 0x1A, 0},
732 {0x97, 0x06, 0}, {0x9D, 0x1A, 0}, {0x9F, 0x10, 0}, {0xB4, 0x22, 0}, 748 {0x9F, 0x10, 0}, {0xB4, 0x22, 0}, {0xBE, 0x80, 0}, {0xDB, 0x00, 0},
733 {0xBE, 0x80, 0}, {0xDB, 0x00, 0}, {0xEE, 0x00, 0}, {0x4C, 0x00, 2}, 749 {0xEE, 0x00, 0}, {0x4C, 0x00, 2},
734 750
735 {0x9F, 0x00, 3}, {0x8C, 0x01, 0}, {0x8D, 0x10, 0}, {0x8E, 0x08, 0}, 751 {0x9F, 0x00, 3}, {0x8C, 0x01, 0}, {0x8D, 0x10, 0}, {0x8E, 0x08, 0},
736 {0x8F, 0x00, 0} 752 {0x8F, 0x00, 0}
@@ -742,48 +758,34 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
742 int res, i; 758 int res, i;
743 u8 reg; 759 u8 reg;
744 760
745 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, 761 rtl8187_set_anaparam(priv, true);
746 RTL818X_EEPROM_CMD_CONFIG);
747
748 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
749 reg |= RTL818X_CONFIG3_ANAPARAM_WRITE | RTL818X_CONFIG3_GNT_SELECT;
750 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
751 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
752 RTL8187B_RTL8225_ANAPARAM2_ON);
753 rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
754 RTL8187B_RTL8225_ANAPARAM_ON);
755 rtl818x_iowrite8(priv, &priv->map->ANAPARAM3,
756 RTL8187B_RTL8225_ANAPARAM3_ON);
757 762
763 /* Reset PLL sequence on 8187B. Realtek note: reduces power
764 * consumption about 30 mA */
758 rtl818x_iowrite8(priv, (u8 *)0xFF61, 0x10); 765 rtl818x_iowrite8(priv, (u8 *)0xFF61, 0x10);
759 reg = rtl818x_ioread8(priv, (u8 *)0xFF62); 766 reg = rtl818x_ioread8(priv, (u8 *)0xFF62);
760 rtl818x_iowrite8(priv, (u8 *)0xFF62, reg & ~(1 << 5)); 767 rtl818x_iowrite8(priv, (u8 *)0xFF62, reg & ~(1 << 5));
761 rtl818x_iowrite8(priv, (u8 *)0xFF62, reg | (1 << 5)); 768 rtl818x_iowrite8(priv, (u8 *)0xFF62, reg | (1 << 5));
762 769
763 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
764 reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
765 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
766
767 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
768 RTL818X_EEPROM_CMD_NORMAL);
769
770 res = rtl8187_cmd_reset(dev); 770 res = rtl8187_cmd_reset(dev);
771 if (res) 771 if (res)
772 return res; 772 return res;
773 773
774 rtl818x_iowrite16(priv, (__le16 *)0xFF2D, 0x0FFF); 774 rtl8187_set_anaparam(priv, true);
775
776 /* BRSR (Basic Rate Set Register) on 8187B looks to be the same as
777 * RESP_RATE on 8187L in Realtek sources: each bit should be each
778 * one of the 12 rates, all are enabled */
779 rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF);
780
775 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); 781 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
776 reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT; 782 reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
777 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); 783 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
778 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
779 reg |= RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT |
780 RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
781 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
782 784
785 /* Auto Rate Fallback Register (ARFR): 1M-54M setting */
783 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFE0, 0x0FFF, 1); 786 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFE0, 0x0FFF, 1);
787 rtl818x_iowrite8_idx(priv, (u8 *)0xFFE2, 0x00, 1);
784 788
785 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
786 rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2);
787 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFD4, 0xFFFF, 1); 789 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFD4, 0xFFFF, 1);
788 790
789 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, 791 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
@@ -811,16 +813,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
811 813
812 rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00004001); 814 rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00004001);
813 815
816 /* RFSW_CTRL register */
814 rtl818x_iowrite16_idx(priv, (__le16 *)0xFF72, 0x569A, 2); 817 rtl818x_iowrite16_idx(priv, (__le16 *)0xFF72, 0x569A, 2);
815 818
816 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
817 RTL818X_EEPROM_CMD_CONFIG);
818 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
819 reg |= RTL818X_CONFIG3_ANAPARAM_WRITE;
820 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
821 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
822 RTL818X_EEPROM_CMD_NORMAL);
823
824 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); 819 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
825 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488); 820 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
826 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); 821 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
@@ -929,6 +924,12 @@ static int rtl8187_start(struct ieee80211_hw *dev)
929 priv->rx_conf = reg; 924 priv->rx_conf = reg;
930 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); 925 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
931 926
927 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
928 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
929 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
930 reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
931 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
932
932 rtl818x_iowrite32(priv, &priv->map->TX_CONF, 933 rtl818x_iowrite32(priv, &priv->map->TX_CONF,
933 RTL818X_TX_CONF_HW_SEQNUM | 934 RTL818X_TX_CONF_HW_SEQNUM |
934 RTL818X_TX_CONF_DISREQQSIZE | 935 RTL818X_TX_CONF_DISREQQSIZE |
@@ -1002,6 +1003,7 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
1002 rtl818x_iowrite8(priv, &priv->map->CMD, reg); 1003 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
1003 1004
1004 priv->rf->stop(dev); 1005 priv->rf->stop(dev);
1006 rtl8187_set_anaparam(priv, false);
1005 1007
1006 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 1008 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
1007 reg = rtl818x_ioread8(priv, &priv->map->CONFIG4); 1009 reg = rtl818x_ioread8(priv, &priv->map->CONFIG4);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
index 97eebdcf7eb..5c6666f09ac 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
@@ -898,29 +898,7 @@ static void rtl8225z2_b_rf_init(struct ieee80211_hw *dev)
898 898
899static void rtl8225_rf_stop(struct ieee80211_hw *dev) 899static void rtl8225_rf_stop(struct ieee80211_hw *dev)
900{ 900{
901 u8 reg;
902 struct rtl8187_priv *priv = dev->priv;
903
904 rtl8225_write(dev, 0x4, 0x1f); 901 rtl8225_write(dev, 0x4, 0x1f);
905
906 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
907 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
908 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
909 if (!priv->is_rtl8187b) {
910 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
911 RTL8187_RTL8225_ANAPARAM2_OFF);
912 rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
913 RTL8187_RTL8225_ANAPARAM_OFF);
914 } else {
915 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
916 RTL8187B_RTL8225_ANAPARAM2_OFF);
917 rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
918 RTL8187B_RTL8225_ANAPARAM_OFF);
919 rtl818x_iowrite8(priv, &priv->map->ANAPARAM3,
920 RTL8187B_RTL8225_ANAPARAM3_OFF);
921 }
922 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
923 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
924} 902}
925 903
926static void rtl8225_rf_set_channel(struct ieee80211_hw *dev, 904static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 7a8762553cd..012e1a4016f 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -52,14 +52,14 @@ void wl1251_disable_interrupts(struct wl1251 *wl)
52 wl->if_ops->disable_irq(wl); 52 wl->if_ops->disable_irq(wl);
53} 53}
54 54
55static void wl1251_power_off(struct wl1251 *wl) 55static int wl1251_power_off(struct wl1251 *wl)
56{ 56{
57 wl->set_power(false); 57 return wl->if_ops->power(wl, false);
58} 58}
59 59
60static void wl1251_power_on(struct wl1251 *wl) 60static int wl1251_power_on(struct wl1251 *wl)
61{ 61{
62 wl->set_power(true); 62 return wl->if_ops->power(wl, true);
63} 63}
64 64
65static int wl1251_fetch_firmware(struct wl1251 *wl) 65static int wl1251_fetch_firmware(struct wl1251 *wl)
@@ -152,9 +152,12 @@ static void wl1251_fw_wakeup(struct wl1251 *wl)
152 152
153static int wl1251_chip_wakeup(struct wl1251 *wl) 153static int wl1251_chip_wakeup(struct wl1251 *wl)
154{ 154{
155 int ret = 0; 155 int ret;
156
157 ret = wl1251_power_on(wl);
158 if (ret < 0)
159 return ret;
156 160
157 wl1251_power_on(wl);
158 msleep(WL1251_POWER_ON_SLEEP); 161 msleep(WL1251_POWER_ON_SLEEP);
159 wl->if_ops->reset(wl); 162 wl->if_ops->reset(wl);
160 163
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
index 74ba9ced539..596d90ecba3 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/wl1251/sdio.c
@@ -26,6 +26,7 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/wl12xx.h> 27#include <linux/wl12xx.h>
28#include <linux/irq.h> 28#include <linux/irq.h>
29#include <linux/pm_runtime.h>
29 30
30#include "wl1251.h" 31#include "wl1251.h"
31 32
@@ -42,8 +43,6 @@ struct wl1251_sdio {
42 u32 elp_val; 43 u32 elp_val;
43}; 44};
44 45
45static struct wl12xx_platform_data *wl12xx_board_data;
46
47static struct sdio_func *wl_to_func(struct wl1251 *wl) 46static struct sdio_func *wl_to_func(struct wl1251 *wl)
48{ 47{
49 struct wl1251_sdio *wl_sdio = wl->if_priv; 48 struct wl1251_sdio *wl_sdio = wl->if_priv;
@@ -171,8 +170,42 @@ static void wl1251_disable_line_irq(struct wl1251 *wl)
171 return disable_irq(wl->irq); 170 return disable_irq(wl->irq);
172} 171}
173 172
174static void wl1251_sdio_set_power(bool enable) 173static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
175{ 174{
175 struct sdio_func *func = wl_to_func(wl);
176 int ret;
177
178 if (enable) {
179 /*
180 * Power is controlled by runtime PM, but we still call board
181 * callback in case it wants to do any additional setup,
182 * for example enabling clock buffer for the module.
183 */
184 if (wl->set_power)
185 wl->set_power(true);
186
187 ret = pm_runtime_get_sync(&func->dev);
188 if (ret < 0)
189 goto out;
190
191 sdio_claim_host(func);
192 sdio_enable_func(func);
193 sdio_release_host(func);
194 } else {
195 sdio_claim_host(func);
196 sdio_disable_func(func);
197 sdio_release_host(func);
198
199 ret = pm_runtime_put_sync(&func->dev);
200 if (ret < 0)
201 goto out;
202
203 if (wl->set_power)
204 wl->set_power(false);
205 }
206
207out:
208 return ret;
176} 209}
177 210
178static struct wl1251_if_operations wl1251_sdio_ops = { 211static struct wl1251_if_operations wl1251_sdio_ops = {
@@ -181,30 +214,7 @@ static struct wl1251_if_operations wl1251_sdio_ops = {
181 .write_elp = wl1251_sdio_write_elp, 214 .write_elp = wl1251_sdio_write_elp,
182 .read_elp = wl1251_sdio_read_elp, 215 .read_elp = wl1251_sdio_read_elp,
183 .reset = wl1251_sdio_reset, 216 .reset = wl1251_sdio_reset,
184}; 217 .power = wl1251_sdio_set_power,
185
186static int wl1251_platform_probe(struct platform_device *pdev)
187{
188 if (pdev->id != -1) {
189 wl1251_error("can only handle single device");
190 return -ENODEV;
191 }
192
193 wl12xx_board_data = pdev->dev.platform_data;
194 return 0;
195}
196
197/*
198 * Dummy platform_driver for passing platform_data to this driver,
199 * until we have a way to pass this through SDIO subsystem or
200 * some other way.
201 */
202static struct platform_driver wl1251_platform_driver = {
203 .driver = {
204 .name = "wl1251_data",
205 .owner = THIS_MODULE,
206 },
207 .probe = wl1251_platform_probe,
208}; 218};
209 219
210static int wl1251_sdio_probe(struct sdio_func *func, 220static int wl1251_sdio_probe(struct sdio_func *func,
@@ -214,6 +224,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
214 struct wl1251 *wl; 224 struct wl1251 *wl;
215 struct ieee80211_hw *hw; 225 struct ieee80211_hw *hw;
216 struct wl1251_sdio *wl_sdio; 226 struct wl1251_sdio *wl_sdio;
227 const struct wl12xx_platform_data *wl12xx_board_data;
217 228
218 hw = wl1251_alloc_hw(); 229 hw = wl1251_alloc_hw();
219 if (IS_ERR(hw)) 230 if (IS_ERR(hw))
@@ -239,8 +250,8 @@ static int wl1251_sdio_probe(struct sdio_func *func,
239 wl_sdio->func = func; 250 wl_sdio->func = func;
240 wl->if_priv = wl_sdio; 251 wl->if_priv = wl_sdio;
241 wl->if_ops = &wl1251_sdio_ops; 252 wl->if_ops = &wl1251_sdio_ops;
242 wl->set_power = wl1251_sdio_set_power;
243 253
254 wl12xx_board_data = wl12xx_get_platform_data();
244 if (wl12xx_board_data != NULL) { 255 if (wl12xx_board_data != NULL) {
245 wl->set_power = wl12xx_board_data->set_power; 256 wl->set_power = wl12xx_board_data->set_power;
246 wl->irq = wl12xx_board_data->irq; 257 wl->irq = wl12xx_board_data->irq;
@@ -273,6 +284,10 @@ static int wl1251_sdio_probe(struct sdio_func *func,
273 goto out_free_irq; 284 goto out_free_irq;
274 285
275 sdio_set_drvdata(func, wl); 286 sdio_set_drvdata(func, wl);
287
288 /* Tell PM core that we don't need the card to be powered now */
289 pm_runtime_put_noidle(&func->dev);
290
276 return ret; 291 return ret;
277 292
278out_free_irq: 293out_free_irq:
@@ -294,6 +309,9 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
294 struct wl1251 *wl = sdio_get_drvdata(func); 309 struct wl1251 *wl = sdio_get_drvdata(func);
295 struct wl1251_sdio *wl_sdio = wl->if_priv; 310 struct wl1251_sdio *wl_sdio = wl->if_priv;
296 311
312 /* Undo decrement done above in wl1251_probe */
313 pm_runtime_get_noresume(&func->dev);
314
297 if (wl->irq) 315 if (wl->irq)
298 free_irq(wl->irq, wl); 316 free_irq(wl->irq, wl);
299 kfree(wl_sdio); 317 kfree(wl_sdio);
@@ -305,23 +323,37 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
305 sdio_release_host(func); 323 sdio_release_host(func);
306} 324}
307 325
326static int wl1251_suspend(struct device *dev)
327{
328 /*
329 * Tell MMC/SDIO core it's OK to power down the card
330 * (if it isn't already), but not to remove it completely.
331 */
332 return 0;
333}
334
335static int wl1251_resume(struct device *dev)
336{
337 return 0;
338}
339
340static const struct dev_pm_ops wl1251_sdio_pm_ops = {
341 .suspend = wl1251_suspend,
342 .resume = wl1251_resume,
343};
344
308static struct sdio_driver wl1251_sdio_driver = { 345static struct sdio_driver wl1251_sdio_driver = {
309 .name = "wl1251_sdio", 346 .name = "wl1251_sdio",
310 .id_table = wl1251_devices, 347 .id_table = wl1251_devices,
311 .probe = wl1251_sdio_probe, 348 .probe = wl1251_sdio_probe,
312 .remove = __devexit_p(wl1251_sdio_remove), 349 .remove = __devexit_p(wl1251_sdio_remove),
350 .drv.pm = &wl1251_sdio_pm_ops,
313}; 351};
314 352
315static int __init wl1251_sdio_init(void) 353static int __init wl1251_sdio_init(void)
316{ 354{
317 int err; 355 int err;
318 356
319 err = platform_driver_register(&wl1251_platform_driver);
320 if (err) {
321 wl1251_error("failed to register platform driver: %d", err);
322 return err;
323 }
324
325 err = sdio_register_driver(&wl1251_sdio_driver); 357 err = sdio_register_driver(&wl1251_sdio_driver);
326 if (err) 358 if (err)
327 wl1251_error("failed to register sdio driver: %d", err); 359 wl1251_error("failed to register sdio driver: %d", err);
@@ -331,7 +363,6 @@ static int __init wl1251_sdio_init(void)
331static void __exit wl1251_sdio_exit(void) 363static void __exit wl1251_sdio_exit(void)
332{ 364{
333 sdio_unregister_driver(&wl1251_sdio_driver); 365 sdio_unregister_driver(&wl1251_sdio_driver);
334 platform_driver_unregister(&wl1251_platform_driver);
335 wl1251_notice("unloaded"); 366 wl1251_notice("unloaded");
336} 367}
337 368
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index 88fa8e69d0d..ac872b38960 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -215,12 +215,21 @@ static void wl1251_spi_disable_irq(struct wl1251 *wl)
215 return disable_irq(wl->irq); 215 return disable_irq(wl->irq);
216} 216}
217 217
218static int wl1251_spi_set_power(struct wl1251 *wl, bool enable)
219{
220 if (wl->set_power)
221 wl->set_power(enable);
222
223 return 0;
224}
225
218static const struct wl1251_if_operations wl1251_spi_ops = { 226static const struct wl1251_if_operations wl1251_spi_ops = {
219 .read = wl1251_spi_read, 227 .read = wl1251_spi_read,
220 .write = wl1251_spi_write, 228 .write = wl1251_spi_write,
221 .reset = wl1251_spi_reset_wake, 229 .reset = wl1251_spi_reset_wake,
222 .enable_irq = wl1251_spi_enable_irq, 230 .enable_irq = wl1251_spi_enable_irq,
223 .disable_irq = wl1251_spi_disable_irq, 231 .disable_irq = wl1251_spi_disable_irq,
232 .power = wl1251_spi_set_power,
224}; 233};
225 234
226static int __devinit wl1251_spi_probe(struct spi_device *spi) 235static int __devinit wl1251_spi_probe(struct spi_device *spi)
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index e113d4c1fb3..13fbeeccf60 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -256,6 +256,7 @@ struct wl1251_if_operations {
256 void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len); 256 void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len);
257 void (*read_elp)(struct wl1251 *wl, int addr, u32 *val); 257 void (*read_elp)(struct wl1251 *wl, int addr, u32 *val);
258 void (*write_elp)(struct wl1251 *wl, int addr, u32 val); 258 void (*write_elp)(struct wl1251 *wl, int addr, u32 val);
259 int (*power)(struct wl1251 *wl, bool enable);
259 void (*reset)(struct wl1251 *wl); 260 void (*reset)(struct wl1251 *wl);
260 void (*enable_irq)(struct wl1251 *wl); 261 void (*enable_irq)(struct wl1251 *wl);
261 void (*disable_irq)(struct wl1251 *wl); 262 void (*disable_irq)(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index b447559f1db..d2adeb1f72b 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -1,46 +1,58 @@
1menuconfig WL12XX 1menuconfig WL12XX_MENU
2 tristate "TI wl12xx driver support" 2 tristate "TI wl12xx driver support"
3 depends on MAC80211 && EXPERIMENTAL 3 depends on MAC80211 && EXPERIMENTAL
4 ---help--- 4 ---help---
5 This will enable TI wl12xx driver support. The drivers make 5 This will enable TI wl12xx driver support for the following chips:
6 use of the mac80211 stack. 6 wl1271 and wl1273.
7 The drivers make use of the mac80211 stack.
7 8
8config WL1271 9config WL12XX
9 tristate "TI wl1271 support" 10 tristate "TI wl12xx support"
10 depends on WL12XX && GENERIC_HARDIRQS 11 depends on WL12XX_MENU && GENERIC_HARDIRQS
11 depends on INET 12 depends on INET
12 select FW_LOADER 13 select FW_LOADER
13 select CRC7 14 select CRC7
14 ---help--- 15 ---help---
15 This module adds support for wireless adapters based on the 16 This module adds support for wireless adapters based on TI wl1271 and
16 TI wl1271 chipset. 17 TI wl1273 chipsets. This module does *not* include support for wl1251.
18 For wl1251 support, use the separate homonymous driver instead.
17 19
18 If you choose to build a module, it'll be called wl1271. Say N if 20 If you choose to build a module, it will be called wl12xx. Say N if
19 unsure. 21 unsure.
20 22
21config WL1271_SPI 23config WL12XX_HT
22 tristate "TI wl1271 SPI support" 24 bool "TI wl12xx 802.11 HT support (EXPERIMENTAL)"
23 depends on WL1271 && SPI_MASTER 25 depends on WL12XX && EXPERIMENTAL
26 default n
27 ---help---
28 This will enable 802.11 HT support in the wl12xx module.
29
30 That configuration is temporary due to the code incomplete and
31 still in testing process.
32
33config WL12XX_SPI
34 tristate "TI wl12xx SPI support"
35 depends on WL12XX && SPI_MASTER
24 ---help--- 36 ---help---
25 This module adds support for the SPI interface of adapters using 37 This module adds support for the SPI interface of adapters using
26 TI wl1271 chipset. Select this if your platform is using 38 TI wl12xx chipsets. Select this if your platform is using
27 the SPI bus. 39 the SPI bus.
28 40
29 If you choose to build a module, it'll be called wl1251_spi. 41 If you choose to build a module, it'll be called wl12xx_spi.
30 Say N if unsure. 42 Say N if unsure.
31 43
32config WL1271_SDIO 44config WL12XX_SDIO
33 tristate "TI wl1271 SDIO support" 45 tristate "TI wl12xx SDIO support"
34 depends on WL1271 && MMC 46 depends on WL12XX && MMC
35 ---help--- 47 ---help---
36 This module adds support for the SDIO interface of adapters using 48 This module adds support for the SDIO interface of adapters using
37 TI wl1271 chipset. Select this if your platform is using 49 TI wl12xx chipsets. Select this if your platform is using
38 the SDIO bus. 50 the SDIO bus.
39 51
40 If you choose to build a module, it'll be called 52 If you choose to build a module, it'll be called wl12xx_sdio.
41 wl1271_sdio. Say N if unsure. 53 Say N if unsure.
42 54
43config WL12XX_PLATFORM_DATA 55config WL12XX_PLATFORM_DATA
44 bool 56 bool
45 depends on WL1271_SDIO != n 57 depends on WL12XX_SDIO != n || WL1251_SDIO != n
46 default y 58 default y
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 3a807444b2a..005a758174d 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -1,12 +1,13 @@
1wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \ 1wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
2 wl1271_event.o wl1271_tx.o wl1271_rx.o \ 2 boot.o init.o debugfs.o scan.o
3 wl1271_ps.o wl1271_acx.o wl1271_boot.o \
4 wl1271_init.o wl1271_debugfs.o wl1271_scan.o
5 3
6wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o 4wl12xx_spi-objs = spi.o
7obj-$(CONFIG_WL1271) += wl1271.o 5wl12xx_sdio-objs = sdio.o
8obj-$(CONFIG_WL1271_SPI) += wl1271_spi.o 6
9obj-$(CONFIG_WL1271_SDIO) += wl1271_sdio.o 7wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
8obj-$(CONFIG_WL12XX) += wl12xx.o
9obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
10obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
10 11
11# small builtin driver bit 12# small builtin driver bit
12obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o 13obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/acx.c
index 61899340526..7cbaeb6d2a3 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -21,7 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include "wl1271_acx.h" 24#include "acx.h"
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
@@ -29,10 +29,10 @@
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31 31
32#include "wl1271.h" 32#include "wl12xx.h"
33#include "wl12xx_80211.h" 33#include "wl12xx_80211.h"
34#include "wl1271_reg.h" 34#include "reg.h"
35#include "wl1271_ps.h" 35#include "ps.h"
36 36
37int wl1271_acx_wake_up_conditions(struct wl1271 *wl) 37int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
38{ 38{
@@ -862,7 +862,7 @@ out:
862 return ret; 862 return ret;
863} 863}
864 864
865int wl1271_acx_frag_threshold(struct wl1271 *wl) 865int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold)
866{ 866{
867 struct acx_frag_threshold *acx; 867 struct acx_frag_threshold *acx;
868 int ret = 0; 868 int ret = 0;
@@ -876,7 +876,7 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl)
876 goto out; 876 goto out;
877 } 877 }
878 878
879 acx->frag_threshold = cpu_to_le16(wl->conf.tx.frag_threshold); 879 acx->frag_threshold = cpu_to_le16(frag_threshold);
880 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx)); 880 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx));
881 if (ret < 0) { 881 if (ret < 0) {
882 wl1271_warning("Setting of frag threshold failed: %d", ret); 882 wl1271_warning("Setting of frag threshold failed: %d", ret);
@@ -1226,6 +1226,89 @@ out:
1226 return ret; 1226 return ret;
1227} 1227}
1228 1228
1229int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1230 struct ieee80211_sta_ht_cap *ht_cap,
1231 bool allow_ht_operation)
1232{
1233 struct wl1271_acx_ht_capabilities *acx;
1234 u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1235 int ret = 0;
1236
1237 wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
1238
1239 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1240 if (!acx) {
1241 ret = -ENOMEM;
1242 goto out;
1243 }
1244
1245 /* Allow HT Operation ? */
1246 if (allow_ht_operation) {
1247 acx->ht_capabilites =
1248 WL1271_ACX_FW_CAP_HT_OPERATION;
1249 if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
1250 acx->ht_capabilites |=
1251 WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
1252 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
1253 acx->ht_capabilites |=
1254 WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
1255 if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
1256 acx->ht_capabilites |=
1257 WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
1258
1259 /* get data from A-MPDU parameters field */
1260 acx->ampdu_max_length = ht_cap->ampdu_factor;
1261 acx->ampdu_min_spacing = ht_cap->ampdu_density;
1262
1263 memcpy(acx->mac_address, mac_address, ETH_ALEN);
1264 } else { /* HT operations are not allowed */
1265 acx->ht_capabilites = 0;
1266 }
1267
1268 ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
1269 if (ret < 0) {
1270 wl1271_warning("acx ht capabilities setting failed: %d", ret);
1271 goto out;
1272 }
1273
1274out:
1275 kfree(acx);
1276 return ret;
1277}
1278
1279int wl1271_acx_set_ht_information(struct wl1271 *wl,
1280 u16 ht_operation_mode)
1281{
1282 struct wl1271_acx_ht_information *acx;
1283 int ret = 0;
1284
1285 wl1271_debug(DEBUG_ACX, "acx ht information setting");
1286
1287 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1288 if (!acx) {
1289 ret = -ENOMEM;
1290 goto out;
1291 }
1292
1293 acx->ht_protection =
1294 (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
1295 acx->rifs_mode = 0;
1296 acx->gf_protection = 0;
1297 acx->ht_tx_burst_limit = 0;
1298 acx->dual_cts_protection = 0;
1299
1300 ret = wl1271_cmd_configure(wl, ACX_HT_BSS_OPERATION, acx, sizeof(*acx));
1301
1302 if (ret < 0) {
1303 wl1271_warning("acx ht information setting failed: %d", ret);
1304 goto out;
1305 }
1306
1307out:
1308 kfree(acx);
1309 return ret;
1310}
1311
1229int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime) 1312int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
1230{ 1313{
1231 struct wl1271_acx_fw_tsf_information *tsf_info; 1314 struct wl1271_acx_fw_tsf_information *tsf_info;
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/acx.h
index ebb341d36e8..75a6306ff55 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -22,11 +22,11 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_ACX_H__ 25#ifndef __ACX_H__
26#define __WL1271_ACX_H__ 26#define __ACX_H__
27 27
28#include "wl1271.h" 28#include "wl12xx.h"
29#include "wl1271_cmd.h" 29#include "cmd.h"
30 30
31/************************************************************************* 31/*************************************************************************
32 32
@@ -61,7 +61,8 @@
61 WL1271_ACX_INTR_HW_AVAILABLE | \ 61 WL1271_ACX_INTR_HW_AVAILABLE | \
62 WL1271_ACX_INTR_DATA) 62 WL1271_ACX_INTR_DATA)
63 63
64#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \ 64#define WL1271_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
65 WL1271_ACX_INTR_EVENT_A | \
65 WL1271_ACX_INTR_EVENT_B | \ 66 WL1271_ACX_INTR_EVENT_B | \
66 WL1271_ACX_INTR_HW_AVAILABLE | \ 67 WL1271_ACX_INTR_HW_AVAILABLE | \
67 WL1271_ACX_INTR_DATA) 68 WL1271_ACX_INTR_DATA)
@@ -964,6 +965,87 @@ struct wl1271_acx_rssi_snr_avg_weights {
964 u8 snr_data; 965 u8 snr_data;
965}; 966};
966 967
968/*
969 * ACX_PEER_HT_CAP
970 * Configure HT capabilities - declare the capabilities of the peer
971 * we are connected to.
972 */
973struct wl1271_acx_ht_capabilities {
974 struct acx_header header;
975
976 /*
977 * bit 0 - Allow HT Operation
978 * bit 1 - Allow Greenfield format in TX
979 * bit 2 - Allow Short GI in TX
980 * bit 3 - Allow L-SIG TXOP Protection in TX
981 * bit 4 - Allow HT Control fields in TX.
982 * Note, driver will still leave space for HT control in packets
983 * regardless of the value of this field. FW will be responsible
984 * to drop the HT field from any frame when this Bit set to 0.
985 * bit 5 - Allow RD initiation in TXOP. FW is allowed to initate RD.
986 * Exact policy setting for this feature is TBD.
987 * Note, this bit can only be set to 1 if bit 3 is set to 1.
988 */
989 __le32 ht_capabilites;
990
991 /*
992 * Indicates to which peer these capabilities apply.
993 * For infrastructure use ff:ff:ff:ff:ff:ff that indicates relevance
994 * for all peers.
995 * Only valid for IBSS/DLS operation.
996 */
997 u8 mac_address[ETH_ALEN];
998
999 /*
1000 * This the maximum A-MPDU length supported by the AP. The FW may not
1001 * exceed this length when sending A-MPDUs
1002 */
1003 u8 ampdu_max_length;
1004
1005 /* This is the minimal spacing required when sending A-MPDUs to the AP*/
1006 u8 ampdu_min_spacing;
1007} __packed;
1008
1009/* HT Capabilites Fw Bit Mask Mapping */
1010#define WL1271_ACX_FW_CAP_HT_OPERATION BIT(0)
1011#define WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT BIT(1)
1012#define WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS BIT(2)
1013#define WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION BIT(3)
1014#define WL1271_ACX_FW_CAP_HT_CONTROL_FIELDS BIT(4)
1015#define WL1271_ACX_FW_CAP_RD_INITIATION BIT(5)
1016
1017
1018/*
1019 * ACX_HT_BSS_OPERATION
1020 * Configure HT capabilities - AP rules for behavior in the BSS.
1021 */
1022struct wl1271_acx_ht_information {
1023 struct acx_header header;
1024
1025 /* Values: 0 - RIFS not allowed, 1 - RIFS allowed */
1026 u8 rifs_mode;
1027
1028 /* Values: 0 - 3 like in spec */
1029 u8 ht_protection;
1030
1031 /* Values: 0 - GF protection not required, 1 - GF protection required */
1032 u8 gf_protection;
1033
1034 /*Values: 0 - TX Burst limit not required, 1 - TX Burst Limit required*/
1035 u8 ht_tx_burst_limit;
1036
1037 /*
1038 * Values: 0 - Dual CTS protection not required,
1039 * 1 - Dual CTS Protection required
1040 * Note: When this value is set to 1 FW will protect all TXOP with RTS
1041 * frame and will not use CTS-to-self regardless of the value of the
1042 * ACX_CTS_PROTECTION information element
1043 */
1044 u8 dual_cts_protection;
1045
1046 u8 padding[3];
1047} __packed;
1048
967struct wl1271_acx_fw_tsf_information { 1049struct wl1271_acx_fw_tsf_information {
968 struct acx_header header; 1050 struct acx_header header;
969 1051
@@ -1079,7 +1161,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
1079int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, 1161int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
1080 u8 tsid, u8 ps_scheme, u8 ack_policy, 1162 u8 tsid, u8 ps_scheme, u8 ack_policy,
1081 u32 apsd_conf0, u32 apsd_conf1); 1163 u32 apsd_conf0, u32 apsd_conf1);
1082int wl1271_acx_frag_threshold(struct wl1271 *wl); 1164int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold);
1083int wl1271_acx_tx_config_options(struct wl1271 *wl); 1165int wl1271_acx_tx_config_options(struct wl1271 *wl);
1084int wl1271_acx_mem_cfg(struct wl1271 *wl); 1166int wl1271_acx_mem_cfg(struct wl1271 *wl);
1085int wl1271_acx_init_mem_config(struct wl1271 *wl); 1167int wl1271_acx_init_mem_config(struct wl1271 *wl);
@@ -1093,6 +1175,11 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
1093int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, 1175int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
1094 s16 thold, u8 hyst); 1176 s16 thold, u8 hyst);
1095int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl); 1177int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
1178int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1179 struct ieee80211_sta_ht_cap *ht_cap,
1180 bool allow_ht_operation);
1181int wl1271_acx_set_ht_information(struct wl1271 *wl,
1182 u16 ht_operation_mode);
1096int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); 1183int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
1097 1184
1098#endif /* __WL1271_ACX_H__ */ 1185#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/boot.c
index b9102124209..1eafb817583 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -24,11 +24,11 @@
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26 26
27#include "wl1271_acx.h" 27#include "acx.h"
28#include "wl1271_reg.h" 28#include "reg.h"
29#include "wl1271_boot.h" 29#include "boot.h"
30#include "wl1271_io.h" 30#include "io.h"
31#include "wl1271_event.h" 31#include "event.h"
32 32
33static struct wl1271_partition_set part_table[PART_TABLE_LEN] = { 33static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
34 [PART_DOWN] = { 34 [PART_DOWN] = {
@@ -471,20 +471,19 @@ int wl1271_boot(struct wl1271 *wl)
471{ 471{
472 int ret = 0; 472 int ret = 0;
473 u32 tmp, clk, pause; 473 u32 tmp, clk, pause;
474 int ref_clock = wl->ref_clock;
475 474
476 wl1271_boot_hw_version(wl); 475 wl1271_boot_hw_version(wl);
477 476
478 if (ref_clock == 0 || ref_clock == 2 || ref_clock == 4) 477 if (wl->ref_clock == 0 || wl->ref_clock == 2 || wl->ref_clock == 4)
479 /* ref clk: 19.2/38.4/38.4-XTAL */ 478 /* ref clk: 19.2/38.4/38.4-XTAL */
480 clk = 0x3; 479 clk = 0x3;
481 else if (ref_clock == 1 || ref_clock == 3) 480 else if (wl->ref_clock == 1 || wl->ref_clock == 3)
482 /* ref clk: 26/52 */ 481 /* ref clk: 26/52 */
483 clk = 0x5; 482 clk = 0x5;
484 else 483 else
485 return -EINVAL; 484 return -EINVAL;
486 485
487 if (ref_clock != 0) { 486 if (wl->ref_clock != 0) {
488 u16 val; 487 u16 val;
489 /* Set clock type (open drain) */ 488 /* Set clock type (open drain) */
490 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE); 489 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
@@ -529,8 +528,7 @@ int wl1271_boot(struct wl1271 *wl)
529 528
530 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 529 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
531 530
532 /* 2 */ 531 clk |= (wl->ref_clock << 1) << 4;
533 clk |= (ref_clock << 1) << 4;
534 wl1271_write32(wl, DRPW_SCRATCH_START, clk); 532 wl1271_write32(wl, DRPW_SCRATCH_START, clk);
535 533
536 wl1271_set_partition(wl, &part_table[PART_WORK]); 534 wl1271_set_partition(wl, &part_table[PART_WORK]);
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/boot.h
index f73b0b15a28..c7d771959f3 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/boot.h
@@ -24,7 +24,7 @@
24#ifndef __BOOT_H__ 24#ifndef __BOOT_H__
25#define __BOOT_H__ 25#define __BOOT_H__
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28 28
29int wl1271_boot(struct wl1271 *wl); 29int wl1271_boot(struct wl1271 *wl);
30 30
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 5d3e8485ea4..f3d0541aaad 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -29,13 +29,13 @@
29#include <linux/ieee80211.h> 29#include <linux/ieee80211.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31 31
32#include "wl1271.h" 32#include "wl12xx.h"
33#include "wl1271_reg.h" 33#include "reg.h"
34#include "wl1271_io.h" 34#include "io.h"
35#include "wl1271_acx.h" 35#include "acx.h"
36#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
37#include "wl1271_cmd.h" 37#include "cmd.h"
38#include "wl1271_event.h" 38#include "event.h"
39 39
40#define WL1271_CMD_FAST_POLL_COUNT 50 40#define WL1271_CMD_FAST_POLL_COUNT 50
41 41
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index a0caf4fc37b..16d1bf814e7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -22,10 +22,10 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_CMD_H__ 25#ifndef __CMD_H__
26#define __WL1271_CMD_H__ 26#define __CMD_H__
27 27
28#include "wl1271.h" 28#include "wl12xx.h"
29 29
30struct acx_header; 30struct acx_header;
31 31
@@ -327,9 +327,6 @@ enum wl1271_channel_tune_bands {
327 327
328#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0 328#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0
329 329
330#define TEST_CMD_P2G_CAL 0x02
331#define TEST_CMD_CHANNEL_TUNE 0x0d
332#define TEST_CMD_UPDATE_PD_REFERENCE_POINT 0x1d
333#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19 330#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19
334#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E 331#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E
335#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26 332#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26
@@ -375,51 +372,6 @@ struct wl1271_ext_radio_parms_cmd {
375 u8 padding[3]; 372 u8 padding[3];
376} __packed; 373} __packed;
377 374
378struct wl1271_cmd_cal_channel_tune {
379 struct wl1271_cmd_header header;
380
381 struct wl1271_cmd_test_header test;
382
383 u8 band;
384 u8 channel;
385
386 __le16 radio_status;
387} __packed;
388
389struct wl1271_cmd_cal_update_ref_point {
390 struct wl1271_cmd_header header;
391
392 struct wl1271_cmd_test_header test;
393
394 __le32 ref_power;
395 __le32 ref_detector;
396 u8 sub_band;
397 u8 padding[3];
398} __packed;
399
400#define MAX_TLV_LENGTH 400
401#define MAX_NVS_VERSION_LENGTH 12
402
403#define WL1271_CAL_P2G_BAND_B_G BIT(0)
404
405struct wl1271_cmd_cal_p2g {
406 struct wl1271_cmd_header header;
407
408 struct wl1271_cmd_test_header test;
409
410 __le16 len;
411 u8 buf[MAX_TLV_LENGTH];
412 u8 type;
413 u8 padding;
414
415 __le16 radio_status;
416 u8 nvs_version[MAX_NVS_VERSION_LENGTH];
417
418 u8 sub_band_mask;
419 u8 padding2;
420} __packed;
421
422
423/* 375/*
424 * There are three types of disconnections: 376 * There are three types of disconnections:
425 * 377 *
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/conf.h
index 5f78a6cb143..a16b3616e43 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_CONF_H__ 24#ifndef __CONF_H__
25#define __WL1271_CONF_H__ 25#define __CONF_H__
26 26
27enum { 27enum {
28 CONF_HW_BIT_RATE_1MBPS = BIT(0), 28 CONF_HW_BIT_RATE_1MBPS = BIT(0),
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 66c2b90ddfd..dd71b7d2105 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -21,31 +21,42 @@
21 * 21 *
22 */ 22 */
23 23
24#include "wl1271_debugfs.h" 24#include "debugfs.h"
25 25
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29#include "wl1271.h" 29#include "wl12xx.h"
30#include "wl1271_acx.h" 30#include "acx.h"
31#include "wl1271_ps.h" 31#include "ps.h"
32#include "wl1271_io.h" 32#include "io.h"
33 33
34/* ms */ 34/* ms */
35#define WL1271_DEBUGFS_STATS_LIFETIME 1000 35#define WL1271_DEBUGFS_STATS_LIFETIME 1000
36 36
37/* debugfs macros idea from mac80211 */ 37/* debugfs macros idea from mac80211 */
38#define DEBUGFS_FORMAT_BUFFER_SIZE 100
39static int wl1271_format_buffer(char __user *userbuf, size_t count,
40 loff_t *ppos, char *fmt, ...)
41{
42 va_list args;
43 char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
44 int res;
38 45
39#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ 46 va_start(args, fmt);
47 res = vscnprintf(buf, sizeof(buf), fmt, args);
48 va_end(args);
49
50 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
51}
52
53#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
40static ssize_t name## _read(struct file *file, char __user *userbuf, \ 54static ssize_t name## _read(struct file *file, char __user *userbuf, \
41 size_t count, loff_t *ppos) \ 55 size_t count, loff_t *ppos) \
42{ \ 56{ \
43 struct wl1271 *wl = file->private_data; \ 57 struct wl1271 *wl = file->private_data; \
44 char buf[buflen]; \ 58 return wl1271_format_buffer(userbuf, count, ppos, \
45 int res; \ 59 fmt "\n", ##value); \
46 \
47 res = scnprintf(buf, buflen, fmt "\n", ##value); \
48 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
49} \ 60} \
50 \ 61 \
51static const struct file_operations name## _ops = { \ 62static const struct file_operations name## _ops = { \
@@ -69,20 +80,17 @@ static const struct file_operations name## _ops = { \
69 wl->debugfs.name = NULL; \ 80 wl->debugfs.name = NULL; \
70 } while (0) 81 } while (0)
71 82
72#define DEBUGFS_FWSTATS_FILE(sub, name, buflen, fmt) \ 83#define DEBUGFS_FWSTATS_FILE(sub, name, fmt) \
73static ssize_t sub## _ ##name## _read(struct file *file, \ 84static ssize_t sub## _ ##name## _read(struct file *file, \
74 char __user *userbuf, \ 85 char __user *userbuf, \
75 size_t count, loff_t *ppos) \ 86 size_t count, loff_t *ppos) \
76{ \ 87{ \
77 struct wl1271 *wl = file->private_data; \ 88 struct wl1271 *wl = file->private_data; \
78 char buf[buflen]; \
79 int res; \
80 \ 89 \
81 wl1271_debugfs_update_stats(wl); \ 90 wl1271_debugfs_update_stats(wl); \
82 \ 91 \
83 res = scnprintf(buf, buflen, fmt "\n", \ 92 return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
84 wl->stats.fw_stats->sub.name); \ 93 wl->stats.fw_stats->sub.name); \
85 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
86} \ 94} \
87 \ 95 \
88static const struct file_operations sub## _ ##name## _ops = { \ 96static const struct file_operations sub## _ ##name## _ops = { \
@@ -126,100 +134,99 @@ static int wl1271_open_file_generic(struct inode *inode, struct file *file)
126 return 0; 134 return 0;
127} 135}
128 136
129DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u"); 137DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
130 138
131DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u"); 139DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
132DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, 20, "%u"); 140DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
133DEBUGFS_FWSTATS_FILE(rx, hw_stuck, 20, "%u"); 141DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
134DEBUGFS_FWSTATS_FILE(rx, dropped, 20, "%u"); 142DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
135DEBUGFS_FWSTATS_FILE(rx, fcs_err, 20, "%u"); 143DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
136DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, 20, "%u"); 144DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
137DEBUGFS_FWSTATS_FILE(rx, path_reset, 20, "%u"); 145DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
138DEBUGFS_FWSTATS_FILE(rx, reset_counter, 20, "%u"); 146DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
139 147
140DEBUGFS_FWSTATS_FILE(dma, rx_requested, 20, "%u"); 148DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
141DEBUGFS_FWSTATS_FILE(dma, rx_errors, 20, "%u"); 149DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
142DEBUGFS_FWSTATS_FILE(dma, tx_requested, 20, "%u"); 150DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
143DEBUGFS_FWSTATS_FILE(dma, tx_errors, 20, "%u"); 151DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
144 152
145DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, 20, "%u"); 153DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
146DEBUGFS_FWSTATS_FILE(isr, fiqs, 20, "%u"); 154DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
147DEBUGFS_FWSTATS_FILE(isr, rx_headers, 20, "%u"); 155DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
148DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, 20, "%u"); 156DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
149DEBUGFS_FWSTATS_FILE(isr, rx_rdys, 20, "%u"); 157DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
150DEBUGFS_FWSTATS_FILE(isr, irqs, 20, "%u"); 158DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
151DEBUGFS_FWSTATS_FILE(isr, tx_procs, 20, "%u"); 159DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
152DEBUGFS_FWSTATS_FILE(isr, decrypt_done, 20, "%u"); 160DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
153DEBUGFS_FWSTATS_FILE(isr, dma0_done, 20, "%u"); 161DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
154DEBUGFS_FWSTATS_FILE(isr, dma1_done, 20, "%u"); 162DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
155DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, 20, "%u"); 163DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
156DEBUGFS_FWSTATS_FILE(isr, commands, 20, "%u"); 164DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
157DEBUGFS_FWSTATS_FILE(isr, rx_procs, 20, "%u"); 165DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
158DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, 20, "%u"); 166DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
159DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, 20, "%u"); 167DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
160DEBUGFS_FWSTATS_FILE(isr, pci_pm, 20, "%u"); 168DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
161DEBUGFS_FWSTATS_FILE(isr, wakeups, 20, "%u"); 169DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
162DEBUGFS_FWSTATS_FILE(isr, low_rssi, 20, "%u"); 170DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
163 171
164DEBUGFS_FWSTATS_FILE(wep, addr_key_count, 20, "%u"); 172DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
165DEBUGFS_FWSTATS_FILE(wep, default_key_count, 20, "%u"); 173DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
166/* skipping wep.reserved */ 174/* skipping wep.reserved */
167DEBUGFS_FWSTATS_FILE(wep, key_not_found, 20, "%u"); 175DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
168DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, 20, "%u"); 176DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
169DEBUGFS_FWSTATS_FILE(wep, packets, 20, "%u"); 177DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
170DEBUGFS_FWSTATS_FILE(wep, interrupt, 20, "%u"); 178DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
171 179
172DEBUGFS_FWSTATS_FILE(pwr, ps_enter, 20, "%u"); 180DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
173DEBUGFS_FWSTATS_FILE(pwr, elp_enter, 20, "%u"); 181DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
174DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, 20, "%u"); 182DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
175DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, 20, "%u"); 183DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
176DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, 20, "%u"); 184DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
177DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, 20, "%u"); 185DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
178DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, 20, "%u"); 186DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
179DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, 20, "%u"); 187DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
180DEBUGFS_FWSTATS_FILE(pwr, power_save_off, 20, "%u"); 188DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
181DEBUGFS_FWSTATS_FILE(pwr, enable_ps, 20, "%u"); 189DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
182DEBUGFS_FWSTATS_FILE(pwr, disable_ps, 20, "%u"); 190DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
183DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, 20, "%u"); 191DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
184/* skipping cont_miss_bcns_spread for now */ 192/* skipping cont_miss_bcns_spread for now */
185DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, 20, "%u"); 193DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
186 194
187DEBUGFS_FWSTATS_FILE(mic, rx_pkts, 20, "%u"); 195DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
188DEBUGFS_FWSTATS_FILE(mic, calc_failure, 20, "%u"); 196DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
189 197
190DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, 20, "%u"); 198DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
191DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, 20, "%u"); 199DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
192DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, 20, "%u"); 200DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
193DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, 20, "%u"); 201DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
194DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, 20, "%u"); 202DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
195DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, 20, "%u"); 203DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
196 204
197DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u"); 205DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
198DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u"); 206DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
199DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u"); 207DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
200DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u"); 208DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
201DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u"); 209DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
202DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u"); 210DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
203DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u"); 211DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
204DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u"); 212DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
205 213
206DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, 20, "%u"); 214DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
207DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, 20, "%u"); 215DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
208DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, 20, "%u"); 216DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
209DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, 20, "%u"); 217DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
210DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, 20, "%u"); 218DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
211DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, 20, "%u"); 219DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
212DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, 20, "%u"); 220DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
213 221
214DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, 20, "%u"); 222DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
215DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, 20, "%u"); 223DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
216DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, 224DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, "%u");
217 20, "%u"); 225DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
218DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, 20, "%u"); 226DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
219DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, 20, "%u"); 227
220 228DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count);
221DEBUGFS_READONLY_FILE(retry_count, 20, "%u", wl->stats.retry_count); 229DEBUGFS_READONLY_FILE(excessive_retries, "%u",
222DEBUGFS_READONLY_FILE(excessive_retries, 20, "%u",
223 wl->stats.excessive_retries); 230 wl->stats.excessive_retries);
224 231
225static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, 232static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.h b/drivers/net/wireless/wl12xx/debugfs.h
index 00a45b2669a..254c5b292cf 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.h
+++ b/drivers/net/wireless/wl12xx/debugfs.h
@@ -21,10 +21,10 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef WL1271_DEBUGFS_H 24#ifndef __DEBUGFS_H__
25#define WL1271_DEBUGFS_H 25#define __DEBUGFS_H__
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28 28
29int wl1271_debugfs_init(struct wl1271 *wl); 29int wl1271_debugfs_init(struct wl1271 *wl);
30void wl1271_debugfs_exit(struct wl1271 *wl); 30void wl1271_debugfs_exit(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/event.c
index 7b3f5038296..f9146f5242f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -21,12 +21,12 @@
21 * 21 *
22 */ 22 */
23 23
24#include "wl1271.h" 24#include "wl12xx.h"
25#include "wl1271_reg.h" 25#include "reg.h"
26#include "wl1271_io.h" 26#include "io.h"
27#include "wl1271_event.h" 27#include "event.h"
28#include "wl1271_ps.h" 28#include "ps.h"
29#include "wl1271_scan.h" 29#include "scan.h"
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31 31
32void wl1271_pspoll_work(struct work_struct *work) 32void wl1271_pspoll_work(struct work_struct *work)
@@ -134,8 +134,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
134 134
135 /* go to extremely low power mode */ 135 /* go to extremely low power mode */
136 wl1271_ps_elp_sleep(wl); 136 wl1271_ps_elp_sleep(wl);
137 if (ret < 0)
138 break;
139 break; 137 break;
140 case EVENT_EXIT_POWER_SAVE_FAIL: 138 case EVENT_EXIT_POWER_SAVE_FAIL:
141 wl1271_debug(DEBUG_PSM, "PSM exit failed"); 139 wl1271_debug(DEBUG_PSM, "PSM exit failed");
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/event.h
index e4751667cf5..6cce0143adb 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_EVENT_H__ 25#ifndef __EVENT_H__
26#define __WL1271_EVENT_H__ 26#define __EVENT_H__
27 27
28/* 28/*
29 * Mbox events 29 * Mbox events
diff --git a/drivers/net/wireless/wl12xx/wl1271_ini.h b/drivers/net/wireless/wl12xx/ini.h
index 2313047d401..c330a2583df 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ini.h
+++ b/drivers/net/wireless/wl12xx/ini.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_INI_H__ 24#ifndef __INI_H__
25#define __WL1271_INI_H__ 25#define __INI_H__
26 26
27#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16 27#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16
28 28
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/init.c
index 8044bba70ee..7949d346aad 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -25,11 +25,11 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27 27
28#include "wl1271_init.h" 28#include "init.h"
29#include "wl12xx_80211.h" 29#include "wl12xx_80211.h"
30#include "wl1271_acx.h" 30#include "acx.h"
31#include "wl1271_cmd.h" 31#include "cmd.h"
32#include "wl1271_reg.h" 32#include "reg.h"
33 33
34static int wl1271_init_hwenc_config(struct wl1271 *wl) 34static int wl1271_init_hwenc_config(struct wl1271 *wl)
35{ 35{
@@ -290,7 +290,7 @@ int wl1271_hw_init(struct wl1271 *wl)
290 goto out_free_memmap; 290 goto out_free_memmap;
291 291
292 /* Default fragmentation threshold */ 292 /* Default fragmentation threshold */
293 ret = wl1271_acx_frag_threshold(wl); 293 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
294 if (ret < 0) 294 if (ret < 0)
295 goto out_free_memmap; 295 goto out_free_memmap;
296 296
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/init.h
index bc26f8c53b9..7762421f860 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -21,10 +21,10 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_INIT_H__ 24#ifndef __INIT_H__
25#define __WL1271_INIT_H__ 25#define __INIT_H__
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28 28
29int wl1271_hw_init_power_auth(struct wl1271 *wl); 29int wl1271_hw_init_power_auth(struct wl1271 *wl);
30int wl1271_init_templates_config(struct wl1271 *wl); 30int wl1271_init_templates_config(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/io.c
index c8759acef13..35c2f1aca6b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -26,9 +26,9 @@
26#include <linux/crc7.h> 26#include <linux/crc7.h>
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28 28
29#include "wl1271.h" 29#include "wl12xx.h"
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "wl1271_io.h" 31#include "io.h"
32 32
33#define OCP_CMD_LOOP 32 33#define OCP_CMD_LOOP 32
34 34
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/io.h
index c1f92e65ded..844b32b170b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -22,10 +22,10 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_IO_H__ 25#ifndef __IO_H__
26#define __WL1271_IO_H__ 26#define __IO_H__
27 27
28#include "wl1271_reg.h" 28#include "reg.h"
29 29
30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
31 31
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/main.c
index 48a4b9961ae..708ffe304c6 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -31,20 +31,20 @@
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33 33
34#include "wl1271.h" 34#include "wl12xx.h"
35#include "wl12xx_80211.h" 35#include "wl12xx_80211.h"
36#include "wl1271_reg.h" 36#include "reg.h"
37#include "wl1271_io.h" 37#include "io.h"
38#include "wl1271_event.h" 38#include "event.h"
39#include "wl1271_tx.h" 39#include "tx.h"
40#include "wl1271_rx.h" 40#include "rx.h"
41#include "wl1271_ps.h" 41#include "ps.h"
42#include "wl1271_init.h" 42#include "init.h"
43#include "wl1271_debugfs.h" 43#include "debugfs.h"
44#include "wl1271_cmd.h" 44#include "cmd.h"
45#include "wl1271_boot.h" 45#include "boot.h"
46#include "wl1271_testmode.h" 46#include "testmode.h"
47#include "wl1271_scan.h" 47#include "scan.h"
48 48
49#define WL1271_BOOT_RETRIES 3 49#define WL1271_BOOT_RETRIES 3
50 50
@@ -335,6 +335,27 @@ out:
335 return NOTIFY_OK; 335 return NOTIFY_OK;
336} 336}
337 337
338static int wl1271_reg_notify(struct wiphy *wiphy,
339 struct regulatory_request *request) {
340 struct ieee80211_supported_band *band;
341 struct ieee80211_channel *ch;
342 int i;
343
344 band = wiphy->bands[IEEE80211_BAND_5GHZ];
345 for (i = 0; i < band->n_channels; i++) {
346 ch = &band->channels[i];
347 if (ch->flags & IEEE80211_CHAN_DISABLED)
348 continue;
349
350 if (ch->flags & IEEE80211_CHAN_RADAR)
351 ch->flags |= IEEE80211_CHAN_NO_IBSS |
352 IEEE80211_CHAN_PASSIVE_SCAN;
353
354 }
355
356 return 0;
357}
358
338static void wl1271_conf_init(struct wl1271 *wl) 359static void wl1271_conf_init(struct wl1271 *wl)
339{ 360{
340 361
@@ -404,7 +425,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
404 goto out_free_memmap; 425 goto out_free_memmap;
405 426
406 /* Default fragmentation threshold */ 427 /* Default fragmentation threshold */
407 ret = wl1271_acx_frag_threshold(wl); 428 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
408 if (ret < 0) 429 if (ret < 0)
409 goto out_free_memmap; 430 goto out_free_memmap;
410 431
@@ -481,9 +502,9 @@ static void wl1271_fw_status(struct wl1271 *wl,
481 total += cnt; 502 total += cnt;
482 } 503 }
483 504
484 /* if more blocks are available now, schedule some tx work */ 505 /* if more blocks are available now, tx work can be scheduled */
485 if (total && !skb_queue_empty(&wl->tx_queue)) 506 if (total)
486 ieee80211_queue_work(wl->hw, &wl->tx_work); 507 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
487 508
488 /* update the host-chipset time offset */ 509 /* update the host-chipset time offset */
489 getnstimeofday(&ts); 510 getnstimeofday(&ts);
@@ -529,6 +550,15 @@ static void wl1271_irq_work(struct work_struct *work)
529 550
530 intr &= WL1271_INTR_MASK; 551 intr &= WL1271_INTR_MASK;
531 552
553 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
554 wl1271_error("watchdog interrupt received! "
555 "starting recovery.");
556 ieee80211_queue_work(wl->hw, &wl->recovery_work);
557
558 /* restarting the chip. ignore any other interrupt. */
559 goto out;
560 }
561
532 if (intr & WL1271_ACX_INTR_DATA) { 562 if (intr & WL1271_ACX_INTR_DATA) {
533 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 563 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
534 564
@@ -537,6 +567,16 @@ static void wl1271_irq_work(struct work_struct *work)
537 (wl->tx_results_count & 0xff)) 567 (wl->tx_results_count & 0xff))
538 wl1271_tx_complete(wl); 568 wl1271_tx_complete(wl);
539 569
570 /* Check if any tx blocks were freed */
571 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
572 !skb_queue_empty(&wl->tx_queue)) {
573 /*
574 * In order to avoid starvation of the TX path,
575 * call the work function directly.
576 */
577 wl1271_tx_work_locked(wl);
578 }
579
540 wl1271_rx(wl, wl->fw_status); 580 wl1271_rx(wl, wl->fw_status);
541 } 581 }
542 582
@@ -851,12 +891,32 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
851 struct ieee80211_sta *sta = txinfo->control.sta; 891 struct ieee80211_sta *sta = txinfo->control.sta;
852 unsigned long flags; 892 unsigned long flags;
853 893
854 /* peek into the rates configured in the STA entry */ 894 /*
895 * peek into the rates configured in the STA entry.
896 * The rates set after connection stage, The first block only BG sets:
897 * the compare is for bit 0-16 of sta_rate_set. The second block add
898 * HT rates in case of HT supported.
899 */
855 spin_lock_irqsave(&wl->wl_lock, flags); 900 spin_lock_irqsave(&wl->wl_lock, flags);
856 if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) { 901 if (sta &&
902 (sta->supp_rates[conf->channel->band] !=
903 (wl->sta_rate_set & HW_BG_RATES_MASK))) {
857 wl->sta_rate_set = sta->supp_rates[conf->channel->band]; 904 wl->sta_rate_set = sta->supp_rates[conf->channel->band];
858 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags); 905 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
859 } 906 }
907
908#ifdef CONFIG_WL12XX_HT
909 if (sta &&
910 sta->ht_cap.ht_supported &&
911 ((wl->sta_rate_set >> HW_HT_RATES_OFFSET) !=
912 sta->ht_cap.mcs.rx_mask[0])) {
913 /* Clean MCS bits before setting them */
914 wl->sta_rate_set &= HW_BG_RATES_MASK;
915 wl->sta_rate_set |=
916 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
917 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
918 }
919#endif
860 spin_unlock_irqrestore(&wl->wl_lock, flags); 920 spin_unlock_irqrestore(&wl->wl_lock, flags);
861 921
862 /* queue the packet */ 922 /* queue the packet */
@@ -867,7 +927,8 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
867 * before that, the tx_work will not be initialized! 927 * before that, the tx_work will not be initialized!
868 */ 928 */
869 929
870 ieee80211_queue_work(wl->hw, &wl->tx_work); 930 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
931 ieee80211_queue_work(wl->hw, &wl->tx_work);
871 932
872 /* 933 /*
873 * The workqueue is slow to process the tx_queue and we need stop 934 * The workqueue is slow to process the tx_queue and we need stop
@@ -919,18 +980,19 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
919 struct wiphy *wiphy = hw->wiphy; 980 struct wiphy *wiphy = hw->wiphy;
920 int retries = WL1271_BOOT_RETRIES; 981 int retries = WL1271_BOOT_RETRIES;
921 int ret = 0; 982 int ret = 0;
983 bool booted = false;
922 984
923 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 985 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
924 vif->type, vif->addr); 986 vif->type, vif->addr);
925 987
926 mutex_lock(&wl->mutex); 988 mutex_lock(&wl->mutex);
927 if (wl->vif) { 989 if (wl->vif) {
990 wl1271_debug(DEBUG_MAC80211,
991 "multiple vifs are not supported yet");
928 ret = -EBUSY; 992 ret = -EBUSY;
929 goto out; 993 goto out;
930 } 994 }
931 995
932 wl->vif = vif;
933
934 switch (vif->type) { 996 switch (vif->type) {
935 case NL80211_IFTYPE_STATION: 997 case NL80211_IFTYPE_STATION:
936 wl->bss_type = BSS_TYPE_STA_BSS; 998 wl->bss_type = BSS_TYPE_STA_BSS;
@@ -968,15 +1030,8 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
968 if (ret < 0) 1030 if (ret < 0)
969 goto irq_disable; 1031 goto irq_disable;
970 1032
971 wl->state = WL1271_STATE_ON; 1033 booted = true;
972 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 1034 break;
973
974 /* update hw/fw version info in wiphy struct */
975 wiphy->hw_version = wl->chip.id;
976 strncpy(wiphy->fw_version, wl->chip.fw_ver,
977 sizeof(wiphy->fw_version));
978
979 goto out;
980 1035
981irq_disable: 1036irq_disable:
982 wl1271_disable_interrupts(wl); 1037 wl1271_disable_interrupts(wl);
@@ -994,8 +1049,21 @@ power_off:
994 wl1271_power_off(wl); 1049 wl1271_power_off(wl);
995 } 1050 }
996 1051
997 wl1271_error("firmware boot failed despite %d retries", 1052 if (!booted) {
998 WL1271_BOOT_RETRIES); 1053 wl1271_error("firmware boot failed despite %d retries",
1054 WL1271_BOOT_RETRIES);
1055 goto out;
1056 }
1057
1058 wl->vif = vif;
1059 wl->state = WL1271_STATE_ON;
1060 wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
1061
1062 /* update hw/fw version info in wiphy struct */
1063 wiphy->hw_version = wl->chip.id;
1064 strncpy(wiphy->fw_version, wl->chip.fw_ver,
1065 sizeof(wiphy->fw_version));
1066
999out: 1067out:
1000 mutex_unlock(&wl->mutex); 1068 mutex_unlock(&wl->mutex);
1001 1069
@@ -1025,6 +1093,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
1025 wl->scan.state = WL1271_SCAN_STATE_IDLE; 1093 wl->scan.state = WL1271_SCAN_STATE_IDLE;
1026 kfree(wl->scan.scanned_ch); 1094 kfree(wl->scan.scanned_ch);
1027 wl->scan.scanned_ch = NULL; 1095 wl->scan.scanned_ch = NULL;
1096 wl->scan.req = NULL;
1028 ieee80211_scan_completed(wl->hw, true); 1097 ieee80211_scan_completed(wl->hw, true);
1029 } 1098 }
1030 1099
@@ -1312,8 +1381,10 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1312 1381
1313 mutex_lock(&wl->mutex); 1382 mutex_lock(&wl->mutex);
1314 1383
1315 if (unlikely(wl->state == WL1271_STATE_OFF)) 1384 if (unlikely(wl->state == WL1271_STATE_OFF)) {
1385 ret = -EAGAIN;
1316 goto out; 1386 goto out;
1387 }
1317 1388
1318 ret = wl1271_ps_elp_wakeup(wl, false); 1389 ret = wl1271_ps_elp_wakeup(wl, false);
1319 if (ret < 0) 1390 if (ret < 0)
@@ -1536,6 +1607,11 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1536 1607
1537 mutex_lock(&wl->mutex); 1608 mutex_lock(&wl->mutex);
1538 1609
1610 if (unlikely(wl->state == WL1271_STATE_OFF)) {
1611 ret = -EAGAIN;
1612 goto out_unlock;
1613 }
1614
1539 ret = wl1271_ps_elp_wakeup(wl, false); 1615 ret = wl1271_ps_elp_wakeup(wl, false);
1540 if (ret < 0) 1616 if (ret < 0)
1541 goto out_unlock; 1617 goto out_unlock;
@@ -1645,6 +1721,16 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
1645 1721
1646 mutex_lock(&wl->mutex); 1722 mutex_lock(&wl->mutex);
1647 1723
1724 if (wl->state == WL1271_STATE_OFF) {
1725 /*
1726 * We cannot return -EBUSY here because cfg80211 will expect
1727 * a call to ieee80211_scan_completed if we do - in this case
1728 * there won't be any call.
1729 */
1730 ret = -EAGAIN;
1731 goto out;
1732 }
1733
1648 ret = wl1271_ps_elp_wakeup(wl, false); 1734 ret = wl1271_ps_elp_wakeup(wl, false);
1649 if (ret < 0) 1735 if (ret < 0)
1650 goto out; 1736 goto out;
@@ -1659,6 +1745,34 @@ out:
1659 return ret; 1745 return ret;
1660} 1746}
1661 1747
1748static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
1749{
1750 struct wl1271 *wl = hw->priv;
1751 int ret = 0;
1752
1753 mutex_lock(&wl->mutex);
1754
1755 if (unlikely(wl->state == WL1271_STATE_OFF)) {
1756 ret = -EAGAIN;
1757 goto out;
1758 }
1759
1760 ret = wl1271_ps_elp_wakeup(wl, false);
1761 if (ret < 0)
1762 goto out;
1763
1764 ret = wl1271_acx_frag_threshold(wl, (u16)value);
1765 if (ret < 0)
1766 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
1767
1768 wl1271_ps_elp_sleep(wl);
1769
1770out:
1771 mutex_unlock(&wl->mutex);
1772
1773 return ret;
1774}
1775
1662static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 1776static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1663{ 1777{
1664 struct wl1271 *wl = hw->priv; 1778 struct wl1271 *wl = hw->priv;
@@ -1666,8 +1780,10 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1666 1780
1667 mutex_lock(&wl->mutex); 1781 mutex_lock(&wl->mutex);
1668 1782
1669 if (unlikely(wl->state == WL1271_STATE_OFF)) 1783 if (unlikely(wl->state == WL1271_STATE_OFF)) {
1784 ret = -EAGAIN;
1670 goto out; 1785 goto out;
1786 }
1671 1787
1672 ret = wl1271_ps_elp_wakeup(wl, false); 1788 ret = wl1271_ps_elp_wakeup(wl, false);
1673 if (ret < 0) 1789 if (ret < 0)
@@ -1709,6 +1825,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1709{ 1825{
1710 enum wl1271_cmd_ps_mode mode; 1826 enum wl1271_cmd_ps_mode mode;
1711 struct wl1271 *wl = hw->priv; 1827 struct wl1271 *wl = hw->priv;
1828 struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
1712 bool do_join = false; 1829 bool do_join = false;
1713 bool set_assoc = false; 1830 bool set_assoc = false;
1714 int ret; 1831 int ret;
@@ -1717,6 +1834,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1717 1834
1718 mutex_lock(&wl->mutex); 1835 mutex_lock(&wl->mutex);
1719 1836
1837 if (unlikely(wl->state == WL1271_STATE_OFF))
1838 goto out;
1839
1720 ret = wl1271_ps_elp_wakeup(wl, false); 1840 ret = wl1271_ps_elp_wakeup(wl, false);
1721 if (ret < 0) 1841 if (ret < 0)
1722 goto out; 1842 goto out;
@@ -1891,9 +2011,12 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1891 2011
1892 /* Disable the keep-alive feature */ 2012 /* Disable the keep-alive feature */
1893 ret = wl1271_acx_keep_alive_mode(wl, false); 2013 ret = wl1271_acx_keep_alive_mode(wl, false);
1894
1895 if (ret < 0) 2014 if (ret < 0)
1896 goto out_sleep; 2015 goto out_sleep;
2016
2017 /* restore the bssid filter and go to dummy bssid */
2018 wl1271_unjoin(wl);
2019 wl1271_dummy_join(wl);
1897 } 2020 }
1898 2021
1899 } 2022 }
@@ -1927,6 +2050,37 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1927 } 2050 }
1928 } 2051 }
1929 2052
2053 /*
2054 * Takes care of: New association with HT enable,
2055 * HT information change in beacon.
2056 */
2057 if (sta &&
2058 (changed & BSS_CHANGED_HT) &&
2059 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
2060 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true);
2061 if (ret < 0) {
2062 wl1271_warning("Set ht cap true failed %d", ret);
2063 goto out_sleep;
2064 }
2065 ret = wl1271_acx_set_ht_information(wl,
2066 bss_conf->ht_operation_mode);
2067 if (ret < 0) {
2068 wl1271_warning("Set ht information failed %d", ret);
2069 goto out_sleep;
2070 }
2071 }
2072 /*
2073 * Takes care of: New association without HT,
2074 * Disassociation.
2075 */
2076 else if (sta && (changed & BSS_CHANGED_ASSOC)) {
2077 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false);
2078 if (ret < 0) {
2079 wl1271_warning("Set ht cap false failed %d", ret);
2080 goto out_sleep;
2081 }
2082 }
2083
1930 if (changed & BSS_CHANGED_ARP_FILTER) { 2084 if (changed & BSS_CHANGED_ARP_FILTER) {
1931 __be32 addr = bss_conf->arp_addr_list[0]; 2085 __be32 addr = bss_conf->arp_addr_list[0];
1932 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); 2086 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
@@ -1966,6 +2120,11 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1966 2120
1967 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); 2121 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
1968 2122
2123 if (unlikely(wl->state == WL1271_STATE_OFF)) {
2124 ret = -EAGAIN;
2125 goto out;
2126 }
2127
1969 ret = wl1271_ps_elp_wakeup(wl, false); 2128 ret = wl1271_ps_elp_wakeup(wl, false);
1970 if (ret < 0) 2129 if (ret < 0)
1971 goto out; 2130 goto out;
@@ -2009,6 +2168,9 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
2009 2168
2010 mutex_lock(&wl->mutex); 2169 mutex_lock(&wl->mutex);
2011 2170
2171 if (unlikely(wl->state == WL1271_STATE_OFF))
2172 goto out;
2173
2012 ret = wl1271_ps_elp_wakeup(wl, false); 2174 ret = wl1271_ps_elp_wakeup(wl, false);
2013 if (ret < 0) 2175 if (ret < 0)
2014 goto out; 2176 goto out;
@@ -2030,14 +2192,14 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
2030{ 2192{
2031 struct wl1271 *wl = hw->priv; 2193 struct wl1271 *wl = hw->priv;
2032 struct ieee80211_conf *conf = &hw->conf; 2194 struct ieee80211_conf *conf = &hw->conf;
2033 2195
2034 if (idx != 0) 2196 if (idx != 0)
2035 return -ENOENT; 2197 return -ENOENT;
2036 2198
2037 survey->channel = conf->channel; 2199 survey->channel = conf->channel;
2038 survey->filled = SURVEY_INFO_NOISE_DBM; 2200 survey->filled = SURVEY_INFO_NOISE_DBM;
2039 survey->noise = wl->noise; 2201 survey->noise = wl->noise;
2040 2202
2041 return 0; 2203 return 0;
2042} 2204}
2043 2205
@@ -2084,37 +2246,34 @@ static struct ieee80211_rate wl1271_rates[] = {
2084 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, 2246 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
2085}; 2247};
2086 2248
2087/* 2249/* can't be const, mac80211 writes to this */
2088 * Can't be const, mac80211 writes to this. The order of the channels here
2089 * is designed to improve scanning.
2090 */
2091static struct ieee80211_channel wl1271_channels[] = { 2250static struct ieee80211_channel wl1271_channels[] = {
2092 { .hw_value = 1, .center_freq = 2412, .max_power = 25 }, 2251 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
2093 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
2094 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
2095 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
2096 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
2097 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
2098 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
2099 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
2100 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
2101 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
2102 { .hw_value = 2, .center_freq = 2417, .max_power = 25 }, 2252 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
2253 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
2254 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
2255 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
2103 { .hw_value = 6, .center_freq = 2437, .max_power = 25 }, 2256 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
2257 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
2258 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
2259 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
2104 { .hw_value = 10, .center_freq = 2457, .max_power = 25 }, 2260 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
2261 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
2262 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
2263 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
2105}; 2264};
2106 2265
2107/* mapping to indexes for wl1271_rates */ 2266/* mapping to indexes for wl1271_rates */
2108static const u8 wl1271_rate_to_idx_2ghz[] = { 2267static const u8 wl1271_rate_to_idx_2ghz[] = {
2109 /* MCS rates are used only with 11n */ 2268 /* MCS rates are used only with 11n */
2110 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */ 2269 7, /* CONF_HW_RXTX_RATE_MCS7 */
2111 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */ 2270 6, /* CONF_HW_RXTX_RATE_MCS6 */
2112 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */ 2271 5, /* CONF_HW_RXTX_RATE_MCS5 */
2113 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */ 2272 4, /* CONF_HW_RXTX_RATE_MCS4 */
2114 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */ 2273 3, /* CONF_HW_RXTX_RATE_MCS3 */
2115 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */ 2274 2, /* CONF_HW_RXTX_RATE_MCS2 */
2116 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */ 2275 1, /* CONF_HW_RXTX_RATE_MCS1 */
2117 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */ 2276 0, /* CONF_HW_RXTX_RATE_MCS0 */
2118 2277
2119 11, /* CONF_HW_RXTX_RATE_54 */ 2278 11, /* CONF_HW_RXTX_RATE_54 */
2120 10, /* CONF_HW_RXTX_RATE_48 */ 2279 10, /* CONF_HW_RXTX_RATE_48 */
@@ -2134,12 +2293,34 @@ static const u8 wl1271_rate_to_idx_2ghz[] = {
2134 0 /* CONF_HW_RXTX_RATE_1 */ 2293 0 /* CONF_HW_RXTX_RATE_1 */
2135}; 2294};
2136 2295
2296/* 11n STA capabilities */
2297#define HW_RX_HIGHEST_RATE 72
2298
2299#ifdef CONFIG_WL12XX_HT
2300#define WL12XX_HT_CAP { \
2301 .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20, \
2302 .ht_supported = true, \
2303 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \
2304 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
2305 .mcs = { \
2306 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \
2307 .rx_highest = cpu_to_le16(HW_RX_HIGHEST_RATE), \
2308 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
2309 }, \
2310}
2311#else
2312#define WL12XX_HT_CAP { \
2313 .ht_supported = false, \
2314}
2315#endif
2316
2137/* can't be const, mac80211 writes to this */ 2317/* can't be const, mac80211 writes to this */
2138static struct ieee80211_supported_band wl1271_band_2ghz = { 2318static struct ieee80211_supported_band wl1271_band_2ghz = {
2139 .channels = wl1271_channels, 2319 .channels = wl1271_channels,
2140 .n_channels = ARRAY_SIZE(wl1271_channels), 2320 .n_channels = ARRAY_SIZE(wl1271_channels),
2141 .bitrates = wl1271_rates, 2321 .bitrates = wl1271_rates,
2142 .n_bitrates = ARRAY_SIZE(wl1271_rates), 2322 .n_bitrates = ARRAY_SIZE(wl1271_rates),
2323 .ht_cap = WL12XX_HT_CAP,
2143}; 2324};
2144 2325
2145/* 5 GHz data rates for WL1273 */ 2326/* 5 GHz data rates for WL1273 */
@@ -2170,66 +2351,63 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
2170 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, 2351 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
2171}; 2352};
2172 2353
2173/* 2354/* 5 GHz band channels for WL1273 */
2174 * 5 GHz band channels for WL1273 - can't be const, mac80211 writes to this.
2175 * The order of the channels here is designed to improve scanning.
2176 */
2177static struct ieee80211_channel wl1271_channels_5ghz[] = { 2355static struct ieee80211_channel wl1271_channels_5ghz[] = {
2178 { .hw_value = 183, .center_freq = 4915}, 2356 { .hw_value = 183, .center_freq = 4915},
2179 { .hw_value = 188, .center_freq = 4940},
2180 { .hw_value = 8, .center_freq = 5040},
2181 { .hw_value = 34, .center_freq = 5170},
2182 { .hw_value = 44, .center_freq = 5220},
2183 { .hw_value = 60, .center_freq = 5300},
2184 { .hw_value = 112, .center_freq = 5560},
2185 { .hw_value = 132, .center_freq = 5660},
2186 { .hw_value = 157, .center_freq = 5785},
2187 { .hw_value = 184, .center_freq = 4920}, 2357 { .hw_value = 184, .center_freq = 4920},
2358 { .hw_value = 185, .center_freq = 4925},
2359 { .hw_value = 187, .center_freq = 4935},
2360 { .hw_value = 188, .center_freq = 4940},
2188 { .hw_value = 189, .center_freq = 4945}, 2361 { .hw_value = 189, .center_freq = 4945},
2189 { .hw_value = 9, .center_freq = 5045},
2190 { .hw_value = 36, .center_freq = 5180},
2191 { .hw_value = 46, .center_freq = 5230},
2192 { .hw_value = 64, .center_freq = 5320},
2193 { .hw_value = 116, .center_freq = 5580},
2194 { .hw_value = 136, .center_freq = 5680},
2195 { .hw_value = 192, .center_freq = 4960}, 2362 { .hw_value = 192, .center_freq = 4960},
2196 { .hw_value = 11, .center_freq = 5055},
2197 { .hw_value = 38, .center_freq = 5190},
2198 { .hw_value = 48, .center_freq = 5240},
2199 { .hw_value = 100, .center_freq = 5500},
2200 { .hw_value = 120, .center_freq = 5600},
2201 { .hw_value = 140, .center_freq = 5700},
2202 { .hw_value = 185, .center_freq = 4925},
2203 { .hw_value = 196, .center_freq = 4980}, 2363 { .hw_value = 196, .center_freq = 4980},
2204 { .hw_value = 12, .center_freq = 5060},
2205 { .hw_value = 40, .center_freq = 5200},
2206 { .hw_value = 52, .center_freq = 5260},
2207 { .hw_value = 104, .center_freq = 5520},
2208 { .hw_value = 124, .center_freq = 5620},
2209 { .hw_value = 149, .center_freq = 5745},
2210 { .hw_value = 161, .center_freq = 5805},
2211 { .hw_value = 187, .center_freq = 4935},
2212 { .hw_value = 7, .center_freq = 5035}, 2364 { .hw_value = 7, .center_freq = 5035},
2365 { .hw_value = 8, .center_freq = 5040},
2366 { .hw_value = 9, .center_freq = 5045},
2367 { .hw_value = 11, .center_freq = 5055},
2368 { .hw_value = 12, .center_freq = 5060},
2213 { .hw_value = 16, .center_freq = 5080}, 2369 { .hw_value = 16, .center_freq = 5080},
2370 { .hw_value = 34, .center_freq = 5170},
2371 { .hw_value = 36, .center_freq = 5180},
2372 { .hw_value = 38, .center_freq = 5190},
2373 { .hw_value = 40, .center_freq = 5200},
2214 { .hw_value = 42, .center_freq = 5210}, 2374 { .hw_value = 42, .center_freq = 5210},
2375 { .hw_value = 44, .center_freq = 5220},
2376 { .hw_value = 46, .center_freq = 5230},
2377 { .hw_value = 48, .center_freq = 5240},
2378 { .hw_value = 52, .center_freq = 5260},
2215 { .hw_value = 56, .center_freq = 5280}, 2379 { .hw_value = 56, .center_freq = 5280},
2380 { .hw_value = 60, .center_freq = 5300},
2381 { .hw_value = 64, .center_freq = 5320},
2382 { .hw_value = 100, .center_freq = 5500},
2383 { .hw_value = 104, .center_freq = 5520},
2216 { .hw_value = 108, .center_freq = 5540}, 2384 { .hw_value = 108, .center_freq = 5540},
2385 { .hw_value = 112, .center_freq = 5560},
2386 { .hw_value = 116, .center_freq = 5580},
2387 { .hw_value = 120, .center_freq = 5600},
2388 { .hw_value = 124, .center_freq = 5620},
2217 { .hw_value = 128, .center_freq = 5640}, 2389 { .hw_value = 128, .center_freq = 5640},
2390 { .hw_value = 132, .center_freq = 5660},
2391 { .hw_value = 136, .center_freq = 5680},
2392 { .hw_value = 140, .center_freq = 5700},
2393 { .hw_value = 149, .center_freq = 5745},
2218 { .hw_value = 153, .center_freq = 5765}, 2394 { .hw_value = 153, .center_freq = 5765},
2395 { .hw_value = 157, .center_freq = 5785},
2396 { .hw_value = 161, .center_freq = 5805},
2219 { .hw_value = 165, .center_freq = 5825}, 2397 { .hw_value = 165, .center_freq = 5825},
2220}; 2398};
2221 2399
2222/* mapping to indexes for wl1271_rates_5ghz */ 2400/* mapping to indexes for wl1271_rates_5ghz */
2223static const u8 wl1271_rate_to_idx_5ghz[] = { 2401static const u8 wl1271_rate_to_idx_5ghz[] = {
2224 /* MCS rates are used only with 11n */ 2402 /* MCS rates are used only with 11n */
2225 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */ 2403 7, /* CONF_HW_RXTX_RATE_MCS7 */
2226 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */ 2404 6, /* CONF_HW_RXTX_RATE_MCS6 */
2227 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */ 2405 5, /* CONF_HW_RXTX_RATE_MCS5 */
2228 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */ 2406 4, /* CONF_HW_RXTX_RATE_MCS4 */
2229 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */ 2407 3, /* CONF_HW_RXTX_RATE_MCS3 */
2230 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */ 2408 2, /* CONF_HW_RXTX_RATE_MCS2 */
2231 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */ 2409 1, /* CONF_HW_RXTX_RATE_MCS1 */
2232 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */ 2410 0, /* CONF_HW_RXTX_RATE_MCS0 */
2233 2411
2234 7, /* CONF_HW_RXTX_RATE_54 */ 2412 7, /* CONF_HW_RXTX_RATE_54 */
2235 6, /* CONF_HW_RXTX_RATE_48 */ 2413 6, /* CONF_HW_RXTX_RATE_48 */
@@ -2254,6 +2432,7 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
2254 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz), 2432 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
2255 .bitrates = wl1271_rates_5ghz, 2433 .bitrates = wl1271_rates_5ghz,
2256 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), 2434 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
2435 .ht_cap = WL12XX_HT_CAP,
2257}; 2436};
2258 2437
2259static const u8 *wl1271_band_rate_to_idx[] = { 2438static const u8 *wl1271_band_rate_to_idx[] = {
@@ -2273,6 +2452,7 @@ static const struct ieee80211_ops wl1271_ops = {
2273 .set_key = wl1271_op_set_key, 2452 .set_key = wl1271_op_set_key,
2274 .hw_scan = wl1271_op_hw_scan, 2453 .hw_scan = wl1271_op_hw_scan,
2275 .bss_info_changed = wl1271_op_bss_info_changed, 2454 .bss_info_changed = wl1271_op_bss_info_changed,
2455 .set_frag_threshold = wl1271_op_set_frag_threshold,
2276 .set_rts_threshold = wl1271_op_set_rts_threshold, 2456 .set_rts_threshold = wl1271_op_set_rts_threshold,
2277 .conf_tx = wl1271_op_conf_tx, 2457 .conf_tx = wl1271_op_conf_tx,
2278 .get_tsf = wl1271_op_get_tsf, 2458 .get_tsf = wl1271_op_get_tsf,
@@ -2281,18 +2461,18 @@ static const struct ieee80211_ops wl1271_ops = {
2281}; 2461};
2282 2462
2283 2463
2284u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate) 2464u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band)
2285{ 2465{
2286 u8 idx; 2466 u8 idx;
2287 2467
2288 BUG_ON(wl->band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *)); 2468 BUG_ON(band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
2289 2469
2290 if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) { 2470 if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
2291 wl1271_error("Illegal RX rate from HW: %d", rate); 2471 wl1271_error("Illegal RX rate from HW: %d", rate);
2292 return 0; 2472 return 0;
2293 } 2473 }
2294 2474
2295 idx = wl1271_band_rate_to_idx[wl->band][rate]; 2475 idx = wl1271_band_rate_to_idx[band][rate];
2296 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) { 2476 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
2297 wl1271_error("Unsupported RX rate from HW: %d", rate); 2477 wl1271_error("Unsupported RX rate from HW: %d", rate);
2298 return 0; 2478 return 0;
@@ -2457,6 +2637,8 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
2457 wl->hw->queues = 4; 2637 wl->hw->queues = 4;
2458 wl->hw->max_rates = 1; 2638 wl->hw->max_rates = 1;
2459 2639
2640 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
2641
2460 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl)); 2642 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
2461 2643
2462 return 0; 2644 return 0;
@@ -2521,6 +2703,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2521 wl->sg_enabled = true; 2703 wl->sg_enabled = true;
2522 wl->hw_pg_ver = -1; 2704 wl->hw_pg_ver = -1;
2523 2705
2706 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
2524 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 2707 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
2525 wl->tx_frames[i] = NULL; 2708 wl->tx_frames[i] = NULL;
2526 2709
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/ps.c
index e3c332e2f97..60a3738eadb 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -21,9 +21,9 @@
21 * 21 *
22 */ 22 */
23 23
24#include "wl1271_reg.h" 24#include "reg.h"
25#include "wl1271_ps.h" 25#include "ps.h"
26#include "wl1271_io.h" 26#include "io.h"
27 27
28#define WL1271_WAKEUP_TIMEOUT 500 28#define WL1271_WAKEUP_TIMEOUT 500
29 29
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/ps.h
index 6ba7b032736..8415060f08e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -21,11 +21,11 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_PS_H__ 24#ifndef __PS_H__
25#define __WL1271_PS_H__ 25#define __PS_H__
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28#include "wl1271_acx.h" 28#include "acx.h"
29 29
30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, 30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
31 u32 rates, bool send); 31 u32 rates, bool send);
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/reg.h
index 99096077152..99096077152 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/rx.c
index bea133b6e48..682304c30b8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -23,11 +23,11 @@
23 23
24#include <linux/gfp.h> 24#include <linux/gfp.h>
25 25
26#include "wl1271.h" 26#include "wl12xx.h"
27#include "wl1271_acx.h" 27#include "acx.h"
28#include "wl1271_reg.h" 28#include "reg.h"
29#include "wl1271_rx.h" 29#include "rx.h"
30#include "wl1271_io.h" 30#include "io.h"
31 31
32static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, 32static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
33 u32 drv_rx_counter) 33 u32 drv_rx_counter)
@@ -48,10 +48,24 @@ static void wl1271_rx_status(struct wl1271 *wl,
48 struct ieee80211_rx_status *status, 48 struct ieee80211_rx_status *status,
49 u8 beacon) 49 u8 beacon)
50{ 50{
51 enum ieee80211_band desc_band;
52
51 memset(status, 0, sizeof(struct ieee80211_rx_status)); 53 memset(status, 0, sizeof(struct ieee80211_rx_status));
52 54
53 status->band = wl->band; 55 status->band = wl->band;
54 status->rate_idx = wl1271_rate_to_idx(wl, desc->rate); 56
57 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
58 desc_band = IEEE80211_BAND_2GHZ;
59 else
60 desc_band = IEEE80211_BAND_5GHZ;
61
62 status->rate_idx = wl1271_rate_to_idx(desc->rate, desc_band);
63
64#ifdef CONFIG_WL12XX_HT
65 /* 11n support */
66 if (desc->rate <= CONF_HW_RXTX_RATE_MCS0)
67 status->flag |= RX_FLAG_HT;
68#endif
55 69
56 status->signal = desc->rssi; 70 status->signal = desc->rssi;
57 71
@@ -170,10 +184,14 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
170 while (pkt_offset < buf_size) { 184 while (pkt_offset < buf_size) {
171 pkt_length = wl1271_rx_get_buf_size(status, 185 pkt_length = wl1271_rx_get_buf_size(status,
172 drv_rx_counter); 186 drv_rx_counter);
173 if (wl1271_rx_handle_data(wl, 187 /*
174 wl->aggr_buf + pkt_offset, 188 * the handle data call can only fail in memory-outage
175 pkt_length) < 0) 189 * conditions, in that case the received frame will just
176 break; 190 * be dropped.
191 */
192 wl1271_rx_handle_data(wl,
193 wl->aggr_buf + pkt_offset,
194 pkt_length);
177 wl->rx_counter++; 195 wl->rx_counter++;
178 drv_rx_counter++; 196 drv_rx_counter++;
179 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; 197 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/rx.h
index 13a232333b1..3abb26fe036 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_RX_H__ 25#ifndef __RX_H__
26#define __WL1271_RX_H__ 26#define __RX_H__
27 27
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29 29
@@ -116,6 +116,6 @@ struct wl1271_rx_descriptor {
116} __packed; 116} __packed;
117 117
118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); 118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
119u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); 119u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
120 120
121#endif 121#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.c b/drivers/net/wireless/wl12xx/scan.c
index 909bb47995b..f3f2c5b011e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -23,10 +23,10 @@
23 23
24#include <linux/ieee80211.h> 24#include <linux/ieee80211.h>
25 25
26#include "wl1271.h" 26#include "wl12xx.h"
27#include "wl1271_cmd.h" 27#include "cmd.h"
28#include "wl1271_scan.h" 28#include "scan.h"
29#include "wl1271_acx.h" 29#include "acx.h"
30 30
31void wl1271_scan_complete_work(struct work_struct *work) 31void wl1271_scan_complete_work(struct work_struct *work)
32{ 32{
@@ -48,14 +48,15 @@ void wl1271_scan_complete_work(struct work_struct *work)
48 wl->scan.state = WL1271_SCAN_STATE_IDLE; 48 wl->scan.state = WL1271_SCAN_STATE_IDLE;
49 kfree(wl->scan.scanned_ch); 49 kfree(wl->scan.scanned_ch);
50 wl->scan.scanned_ch = NULL; 50 wl->scan.scanned_ch = NULL;
51 mutex_unlock(&wl->mutex); 51 wl->scan.req = NULL;
52
53 ieee80211_scan_completed(wl->hw, false); 52 ieee80211_scan_completed(wl->hw, false);
54 53
55 if (wl->scan.failed) { 54 if (wl->scan.failed) {
56 wl1271_info("Scan completed due to error."); 55 wl1271_info("Scan completed due to error.");
57 ieee80211_queue_work(wl->hw, &wl->recovery_work); 56 ieee80211_queue_work(wl->hw, &wl->recovery_work);
58 } 57 }
58 mutex_unlock(&wl->mutex);
59
59} 60}
60 61
61 62
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.h b/drivers/net/wireless/wl12xx/scan.h
index 6d57127b5e6..421a750add5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -21,10 +21,10 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_SCAN_H__ 24#ifndef __SCAN_H__
25#define __WL1271_SCAN_H__ 25#define __SCAN_H__
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28 28
29int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, 29int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
30 struct cfg80211_scan_request *req); 30 struct cfg80211_scan_request *req);
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 784ef343264..93cbb8d5aba 100644
--- a/drivers/net/wireless/wl12xx/wl1271_sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -32,9 +32,9 @@
32#include <linux/wl12xx.h> 32#include <linux/wl12xx.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34 34
35#include "wl1271.h" 35#include "wl12xx.h"
36#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
37#include "wl1271_io.h" 37#include "io.h"
38 38
39#ifndef SDIO_VENDOR_ID_TI 39#ifndef SDIO_VENDOR_ID_TI
40#define SDIO_VENDOR_ID_TI 0x0097 40#define SDIO_VENDOR_ID_TI 0x0097
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/spi.c
index ef801680773..46714910f98 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -28,11 +28,11 @@
28#include <linux/wl12xx.h> 28#include <linux/wl12xx.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "wl1271.h" 31#include "wl12xx.h"
32#include "wl12xx_80211.h" 32#include "wl12xx_80211.h"
33#include "wl1271_io.h" 33#include "io.h"
34 34
35#include "wl1271_reg.h" 35#include "reg.h"
36 36
37#define WSPI_CMD_READ 0x40000000 37#define WSPI_CMD_READ 0x40000000
38#define WSPI_CMD_WRITE 0x00000000 38#define WSPI_CMD_WRITE 0x00000000
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index a3aa84386c8..e64403b6896 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -20,13 +20,13 @@
20 * 02110-1301 USA 20 * 02110-1301 USA
21 * 21 *
22 */ 22 */
23#include "wl1271_testmode.h" 23#include "testmode.h"
24 24
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <net/genetlink.h> 26#include <net/genetlink.h>
27 27
28#include "wl1271.h" 28#include "wl12xx.h"
29#include "wl1271_acx.h" 29#include "acx.h"
30 30
31#define WL1271_TM_MAX_DATA_LENGTH 1024 31#define WL1271_TM_MAX_DATA_LENGTH 1024
32 32
@@ -37,6 +37,7 @@ enum wl1271_tm_commands {
37 WL1271_TM_CMD_CONFIGURE, 37 WL1271_TM_CMD_CONFIGURE,
38 WL1271_TM_CMD_NVS_PUSH, 38 WL1271_TM_CMD_NVS_PUSH,
39 WL1271_TM_CMD_SET_PLT_MODE, 39 WL1271_TM_CMD_SET_PLT_MODE,
40 WL1271_TM_CMD_RECOVER,
40 41
41 __WL1271_TM_CMD_AFTER_LAST 42 __WL1271_TM_CMD_AFTER_LAST
42}; 43};
@@ -248,6 +249,15 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
248 return ret; 249 return ret;
249} 250}
250 251
252static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[])
253{
254 wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover");
255
256 ieee80211_queue_work(wl->hw, &wl->recovery_work);
257
258 return 0;
259}
260
251int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len) 261int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
252{ 262{
253 struct wl1271 *wl = hw->priv; 263 struct wl1271 *wl = hw->priv;
@@ -272,6 +282,8 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
272 return wl1271_tm_cmd_nvs_push(wl, tb); 282 return wl1271_tm_cmd_nvs_push(wl, tb);
273 case WL1271_TM_CMD_SET_PLT_MODE: 283 case WL1271_TM_CMD_SET_PLT_MODE:
274 return wl1271_tm_cmd_set_plt_mode(wl, tb); 284 return wl1271_tm_cmd_set_plt_mode(wl, tb);
285 case WL1271_TM_CMD_RECOVER:
286 return wl1271_tm_cmd_recover(wl, tb);
275 default: 287 default:
276 return -EOPNOTSUPP; 288 return -EOPNOTSUPP;
277 } 289 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.h b/drivers/net/wireless/wl12xx/testmode.h
index c196d28f9d9..8071654259e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.h
+++ b/drivers/net/wireless/wl12xx/testmode.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_TESTMODE_H__ 24#ifndef __TESTMODE_H__
25#define __WL1271_TESTMODE_H__ 25#define __TESTMODE_H__
26 26
27#include <net/mac80211.h> 27#include <net/mac80211.h>
28 28
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/tx.c
index e3dc13c4d01..d332b3f6d0f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -24,23 +24,32 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h> 25#include <linux/module.h>
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28#include "wl1271_io.h" 28#include "io.h"
29#include "wl1271_reg.h" 29#include "reg.h"
30#include "wl1271_ps.h" 30#include "ps.h"
31#include "wl1271_tx.h" 31#include "tx.h"
32 32
33static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb) 33static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
34{ 34{
35 int i; 35 int id;
36 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 36
37 if (wl->tx_frames[i] == NULL) { 37 id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS);
38 wl->tx_frames[i] = skb; 38 if (id >= ACX_TX_DESCRIPTORS)
39 wl->tx_frames_cnt++; 39 return -EBUSY;
40 return i; 40
41 } 41 __set_bit(id, wl->tx_frames_map);
42 wl->tx_frames[id] = skb;
43 wl->tx_frames_cnt++;
44 return id;
45}
42 46
43 return -EBUSY; 47static void wl1271_free_tx_id(struct wl1271 *wl, int id)
48{
49 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
50 wl->tx_frames[id] = NULL;
51 wl->tx_frames_cnt--;
52 }
44} 53}
45 54
46static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, 55static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
@@ -52,10 +61,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
52 int id, ret = -EBUSY; 61 int id, ret = -EBUSY;
53 62
54 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 63 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
55 return -EBUSY; 64 return -EAGAIN;
56 65
57 /* allocate free identifier for the packet */ 66 /* allocate free identifier for the packet */
58 id = wl1271_tx_id(wl, skb); 67 id = wl1271_alloc_tx_id(wl, skb);
59 if (id < 0) 68 if (id < 0)
60 return id; 69 return id;
61 70
@@ -79,8 +88,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
79 "tx_allocate: size: %d, blocks: %d, id: %d", 88 "tx_allocate: size: %d, blocks: %d, id: %d",
80 total_len, total_blocks, id); 89 total_len, total_blocks, id);
81 } else { 90 } else {
82 wl->tx_frames[id] = NULL; 91 wl1271_free_tx_id(wl, id);
83 wl->tx_frames_cnt--;
84 } 92 }
85 93
86 return ret; 94 return ret;
@@ -201,41 +209,67 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
201 rate_set >>= 1; 209 rate_set >>= 1;
202 } 210 }
203 211
212#ifdef CONFIG_WL12XX_HT
213 /* MCS rates indication are on bits 16 - 23 */
214 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
215
216 for (bit = 0; bit < 8; bit++) {
217 if (rate_set & 0x1)
218 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
219 rate_set >>= 1;
220 }
221#endif
222
204 return enabled_rates; 223 return enabled_rates;
205} 224}
206 225
207void wl1271_tx_work(struct work_struct *work) 226static void handle_tx_low_watermark(struct wl1271 *wl)
227{
228 unsigned long flags;
229
230 if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
231 skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
232 /* firmware buffer has space, restart queues */
233 spin_lock_irqsave(&wl->wl_lock, flags);
234 ieee80211_wake_queues(wl->hw);
235 clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
236 spin_unlock_irqrestore(&wl->wl_lock, flags);
237 }
238}
239
240void wl1271_tx_work_locked(struct wl1271 *wl)
208{ 241{
209 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
210 struct sk_buff *skb; 242 struct sk_buff *skb;
211 bool woken_up = false; 243 bool woken_up = false;
212 u32 sta_rates = 0; 244 u32 sta_rates = 0;
213 u32 buf_offset; 245 u32 buf_offset = 0;
246 bool sent_packets = false;
214 int ret; 247 int ret;
215 248
216 /* check if the rates supported by the AP have changed */ 249 /* check if the rates supported by the AP have changed */
217 if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED, 250 if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
218 &wl->flags))) { 251 &wl->flags))) {
219 unsigned long flags; 252 unsigned long flags;
253
220 spin_lock_irqsave(&wl->wl_lock, flags); 254 spin_lock_irqsave(&wl->wl_lock, flags);
221 sta_rates = wl->sta_rate_set; 255 sta_rates = wl->sta_rate_set;
222 spin_unlock_irqrestore(&wl->wl_lock, flags); 256 spin_unlock_irqrestore(&wl->wl_lock, flags);
223 } 257 }
224 258
225 mutex_lock(&wl->mutex);
226
227 if (unlikely(wl->state == WL1271_STATE_OFF)) 259 if (unlikely(wl->state == WL1271_STATE_OFF))
228 goto out; 260 goto out;
229 261
230 /* if rates have changed, re-configure the rate policy */ 262 /* if rates have changed, re-configure the rate policy */
231 if (unlikely(sta_rates)) { 263 if (unlikely(sta_rates)) {
264 ret = wl1271_ps_elp_wakeup(wl, false);
265 if (ret < 0)
266 goto out;
267 woken_up = true;
268
232 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates); 269 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
233 wl1271_acx_rate_policies(wl); 270 wl1271_acx_rate_policies(wl);
234 } 271 }
235 272
236 /* Prepare the transfer buffer, by aggregating all
237 * available packets */
238 buf_offset = 0;
239 while ((skb = skb_dequeue(&wl->tx_queue))) { 273 while ((skb = skb_dequeue(&wl->tx_queue))) {
240 if (!woken_up) { 274 if (!woken_up) {
241 ret = wl1271_ps_elp_wakeup(wl, false); 275 ret = wl1271_ps_elp_wakeup(wl, false);
@@ -245,13 +279,25 @@ void wl1271_tx_work(struct work_struct *work)
245 } 279 }
246 280
247 ret = wl1271_prepare_tx_frame(wl, skb, buf_offset); 281 ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
248 if (ret == -EBUSY) { 282 if (ret == -EAGAIN) {
283 /*
284 * Aggregation buffer is full.
285 * Flush buffer and try again.
286 */
287 skb_queue_head(&wl->tx_queue, skb);
288 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
289 buf_offset, true);
290 sent_packets = true;
291 buf_offset = 0;
292 continue;
293 } else if (ret == -EBUSY) {
249 /* 294 /*
250 * Either the firmware buffer is full, or the 295 * Firmware buffer is full.
251 * aggregation buffer is.
252 * Queue back last skb, and stop aggregating. 296 * Queue back last skb, and stop aggregating.
253 */ 297 */
254 skb_queue_head(&wl->tx_queue, skb); 298 skb_queue_head(&wl->tx_queue, skb);
299 /* No work left, avoid scheduling redundant tx work */
300 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
255 goto out_ack; 301 goto out_ack;
256 } else if (ret < 0) { 302 } else if (ret < 0) {
257 dev_kfree_skb(skb); 303 dev_kfree_skb(skb);
@@ -265,14 +311,25 @@ out_ack:
265 if (buf_offset) { 311 if (buf_offset) {
266 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, 312 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
267 buf_offset, true); 313 buf_offset, true);
314 sent_packets = true;
315 }
316 if (sent_packets) {
268 /* interrupt the firmware with the new packets */ 317 /* interrupt the firmware with the new packets */
269 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 318 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
319 handle_tx_low_watermark(wl);
270 } 320 }
271 321
272out: 322out:
273 if (woken_up) 323 if (woken_up)
274 wl1271_ps_elp_sleep(wl); 324 wl1271_ps_elp_sleep(wl);
325}
326
327void wl1271_tx_work(struct work_struct *work)
328{
329 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
275 330
331 mutex_lock(&wl->mutex);
332 wl1271_tx_work_locked(wl);
276 mutex_unlock(&wl->mutex); 333 mutex_unlock(&wl->mutex);
277} 334}
278 335
@@ -298,7 +355,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
298 if (result->status == TX_SUCCESS) { 355 if (result->status == TX_SUCCESS) {
299 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 356 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
300 info->flags |= IEEE80211_TX_STAT_ACK; 357 info->flags |= IEEE80211_TX_STAT_ACK;
301 rate = wl1271_rate_to_idx(wl, result->rate_class_index); 358 rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
302 retries = result->ack_failures; 359 retries = result->ack_failures;
303 } else if (result->status == TX_RETRY_EXCEEDED) { 360 } else if (result->status == TX_RETRY_EXCEEDED) {
304 wl->stats.excessive_retries++; 361 wl->stats.excessive_retries++;
@@ -335,8 +392,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
335 392
336 /* return the packet to the stack */ 393 /* return the packet to the stack */
337 ieee80211_tx_status(wl->hw, skb); 394 ieee80211_tx_status(wl->hw, skb);
338 wl->tx_frames[result->id] = NULL; 395 wl1271_free_tx_id(wl, result->id);
339 wl->tx_frames_cnt--;
340} 396}
341 397
342/* Called upon reception of a TX complete interrupt */ 398/* Called upon reception of a TX complete interrupt */
@@ -375,19 +431,6 @@ void wl1271_tx_complete(struct wl1271 *wl)
375 431
376 wl->tx_results_count++; 432 wl->tx_results_count++;
377 } 433 }
378
379 if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
380 skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
381 unsigned long flags;
382
383 /* firmware buffer has space, restart queues */
384 wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
385 spin_lock_irqsave(&wl->wl_lock, flags);
386 ieee80211_wake_queues(wl->hw);
387 clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
388 spin_unlock_irqrestore(&wl->wl_lock, flags);
389 ieee80211_queue_work(wl->hw, &wl->tx_work);
390 }
391} 434}
392 435
393/* caller must hold wl->mutex */ 436/* caller must hold wl->mutex */
@@ -402,14 +445,19 @@ void wl1271_tx_reset(struct wl1271 *wl)
402 ieee80211_tx_status(wl->hw, skb); 445 ieee80211_tx_status(wl->hw, skb);
403 } 446 }
404 447
448 /*
449 * Make sure the driver is at a consistent state, in case this
450 * function is called from a context other than interface removal.
451 */
452 handle_tx_low_watermark(wl);
453
405 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 454 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
406 if (wl->tx_frames[i] != NULL) { 455 if (wl->tx_frames[i] != NULL) {
407 skb = wl->tx_frames[i]; 456 skb = wl->tx_frames[i];
408 wl->tx_frames[i] = NULL; 457 wl1271_free_tx_id(wl, i);
409 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 458 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
410 ieee80211_tx_status(wl->hw, skb); 459 ieee80211_tx_status(wl->hw, skb);
411 } 460 }
412 wl->tx_frames_cnt = 0;
413} 461}
414 462
415#define WL1271_TX_FLUSH_TIMEOUT 500000 463#define WL1271_TX_FLUSH_TIMEOUT 500000
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/tx.h
index d12a129ad11..903e5dc69b7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_TX_H__ 25#ifndef __TX_H__
26#define __WL1271_TX_H__ 26#define __TX_H__
27 27
28#define TX_HW_BLOCK_SPARE 2 28#define TX_HW_BLOCK_SPARE 2
29#define TX_HW_BLOCK_SIZE 252 29#define TX_HW_BLOCK_SIZE 252
@@ -140,10 +140,11 @@ static inline int wl1271_tx_get_queue(int queue)
140} 140}
141 141
142void wl1271_tx_work(struct work_struct *work); 142void wl1271_tx_work(struct work_struct *work);
143void wl1271_tx_work_locked(struct wl1271 *wl);
143void wl1271_tx_complete(struct wl1271 *wl); 144void wl1271_tx_complete(struct wl1271 *wl);
144void wl1271_tx_reset(struct wl1271 *wl); 145void wl1271_tx_reset(struct wl1271 *wl);
145void wl1271_tx_flush(struct wl1271 *wl); 146void wl1271_tx_flush(struct wl1271 *wl);
146u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); 147u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
147u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set); 148u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
148 149
149#endif 150#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 8a4cd763e5a..3c836e6063e 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_H__ 25#ifndef __WL12XX_H__
26#define __WL1271_H__ 26#define __WL12XX_H__
27 27
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/completion.h> 29#include <linux/completion.h>
@@ -32,8 +32,8 @@
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34 34
35#include "wl1271_conf.h" 35#include "conf.h"
36#include "wl1271_ini.h" 36#include "ini.h"
37 37
38#define DRIVER_NAME "wl1271" 38#define DRIVER_NAME "wl1271"
39#define DRIVER_PREFIX DRIVER_NAME ": " 39#define DRIVER_PREFIX DRIVER_NAME ": "
@@ -351,6 +351,7 @@ struct wl1271 {
351#define WL1271_FLAG_IDLE_REQUESTED (11) 351#define WL1271_FLAG_IDLE_REQUESTED (11)
352#define WL1271_FLAG_PSPOLL_FAILURE (12) 352#define WL1271_FLAG_PSPOLL_FAILURE (12)
353#define WL1271_FLAG_STA_STATE_SENT (13) 353#define WL1271_FLAG_STA_STATE_SENT (13)
354#define WL1271_FLAG_FW_TX_BUSY (14)
354 unsigned long flags; 355 unsigned long flags;
355 356
356 struct wl1271_partition_set part; 357 struct wl1271_partition_set part;
@@ -397,6 +398,7 @@ struct wl1271 {
397 struct work_struct tx_work; 398 struct work_struct tx_work;
398 399
399 /* Pending TX frames */ 400 /* Pending TX frames */
401 unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)];
400 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS]; 402 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
401 int tx_frames_cnt; 403 int tx_frames_cnt;
402 404
@@ -432,7 +434,12 @@ struct wl1271 {
432 /* Our association ID */ 434 /* Our association ID */
433 u16 aid; 435 u16 aid;
434 436
435 /* currently configured rate set */ 437 /*
438 * currently configured rate set:
439 * bits 0-15 - 802.11abg rates
440 * bits 16-23 - 802.11n MCS index mask
441 * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
442 */
436 u32 sta_rate_set; 443 u32 sta_rate_set;
437 u32 basic_rate_set; 444 u32 basic_rate_set;
438 u32 basic_rate; 445 u32 basic_rate;
@@ -509,4 +516,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
509#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */ 516#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
510#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ 517#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
511 518
519/* Macros to handle wl1271.sta_rate_set */
520#define HW_BG_RATES_MASK 0xffff
521#define HW_HT_RATES_OFFSET 16
522
512#endif 523#endif
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 390d77f762c..b97aa9c78a9 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -30,6 +30,7 @@ static struct usb_device_id zd1201_table[] = {
30 {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */ 30 {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */
31 {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */ 31 {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */
32 {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */ 32 {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */
33 {USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */
33 {USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */ 34 {USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */
34 {} 35 {}
35}; 36};
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 87a95bcfee5..6a9b66051cf 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -117,6 +117,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
117 117
118 /* Allocate a single memory block for values and addresses. */ 118 /* Allocate a single memory block for values and addresses. */
119 count16 = 2*count; 119 count16 = 2*count;
120 /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
120 a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), 121 a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
121 GFP_KERNEL); 122 GFP_KERNEL);
122 if (!a16) { 123 if (!a16) {
@@ -1448,7 +1449,7 @@ int zd_rfwritev_locked(struct zd_chip *chip,
1448 */ 1449 */
1449int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value) 1450int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value)
1450{ 1451{
1451 struct zd_ioreq16 ioreqs[] = { 1452 const struct zd_ioreq16 ioreqs[] = {
1452 { CR244, (value >> 16) & 0xff }, 1453 { CR244, (value >> 16) & 0xff },
1453 { CR243, (value >> 8) & 0xff }, 1454 { CR243, (value >> 8) & 0xff },
1454 { CR242, value & 0xff }, 1455 { CR242, value & 0xff },
@@ -1475,7 +1476,7 @@ int zd_rfwritev_cr_locked(struct zd_chip *chip,
1475int zd_chip_set_multicast_hash(struct zd_chip *chip, 1476int zd_chip_set_multicast_hash(struct zd_chip *chip,
1476 struct zd_mc_hash *hash) 1477 struct zd_mc_hash *hash)
1477{ 1478{
1478 struct zd_ioreq32 ioreqs[] = { 1479 const struct zd_ioreq32 ioreqs[] = {
1479 { CR_GROUP_HASH_P1, hash->low }, 1480 { CR_GROUP_HASH_P1, hash->low },
1480 { CR_GROUP_HASH_P2, hash->high }, 1481 { CR_GROUP_HASH_P2, hash->high },
1481 }; 1482 };
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 818e1480ca9..06041cb1c42 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -55,6 +55,7 @@ static struct usb_device_id usb_ids[] = {
55 { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 }, 55 { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 },
56 { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 }, 56 { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 },
57 { USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 }, 57 { USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 },
58 { USB_DEVICE(0x14ea, 0xab10), .driver_info = DEVICE_ZD1211 },
58 { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 }, 59 { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 },
59 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, 60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
60 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, 61 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
@@ -92,6 +93,7 @@ static struct usb_device_id usb_ids[] = {
92 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, 93 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
93 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, 94 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
94 { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B }, 95 { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B },
96 { USB_DEVICE(0x2019, 0xed01), .driver_info = DEVICE_ZD1211B },
95 /* "Driverless" devices that need ejecting */ 97 /* "Driverless" devices that need ejecting */
96 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 98 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
97 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, 99 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 14f0955eca6..de6c3086d23 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -515,7 +515,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
515 */ 515 */
516static int xemaclite_set_mac_address(struct net_device *dev, void *address) 516static int xemaclite_set_mac_address(struct net_device *dev, void *address)
517{ 517{
518 struct net_local *lp = (struct net_local *) netdev_priv(dev); 518 struct net_local *lp = netdev_priv(dev);
519 struct sockaddr *addr = address; 519 struct sockaddr *addr = address;
520 520
521 if (netif_running(dev)) 521 if (netif_running(dev))
@@ -534,7 +534,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
534 */ 534 */
535static void xemaclite_tx_timeout(struct net_device *dev) 535static void xemaclite_tx_timeout(struct net_device *dev)
536{ 536{
537 struct net_local *lp = (struct net_local *) netdev_priv(dev); 537 struct net_local *lp = netdev_priv(dev);
538 unsigned long flags; 538 unsigned long flags;
539 539
540 dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n", 540 dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
@@ -578,7 +578,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
578 */ 578 */
579static void xemaclite_tx_handler(struct net_device *dev) 579static void xemaclite_tx_handler(struct net_device *dev)
580{ 580{
581 struct net_local *lp = (struct net_local *) netdev_priv(dev); 581 struct net_local *lp = netdev_priv(dev);
582 582
583 dev->stats.tx_packets++; 583 dev->stats.tx_packets++;
584 if (lp->deferred_skb) { 584 if (lp->deferred_skb) {
@@ -605,7 +605,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
605 */ 605 */
606static void xemaclite_rx_handler(struct net_device *dev) 606static void xemaclite_rx_handler(struct net_device *dev)
607{ 607{
608 struct net_local *lp = (struct net_local *) netdev_priv(dev); 608 struct net_local *lp = netdev_priv(dev);
609 struct sk_buff *skb; 609 struct sk_buff *skb;
610 unsigned int align; 610 unsigned int align;
611 u32 len; 611 u32 len;
@@ -661,7 +661,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
661{ 661{
662 bool tx_complete = 0; 662 bool tx_complete = 0;
663 struct net_device *dev = dev_id; 663 struct net_device *dev = dev_id;
664 struct net_local *lp = (struct net_local *) netdev_priv(dev); 664 struct net_local *lp = netdev_priv(dev);
665 void __iomem *base_addr = lp->base_addr; 665 void __iomem *base_addr = lp->base_addr;
666 u32 tx_status; 666 u32 tx_status;
667 667
@@ -918,7 +918,7 @@ void xemaclite_adjust_link(struct net_device *ndev)
918 */ 918 */
919static int xemaclite_open(struct net_device *dev) 919static int xemaclite_open(struct net_device *dev)
920{ 920{
921 struct net_local *lp = (struct net_local *) netdev_priv(dev); 921 struct net_local *lp = netdev_priv(dev);
922 int retval; 922 int retval;
923 923
924 /* Just to be safe, stop the device first */ 924 /* Just to be safe, stop the device first */
@@ -987,7 +987,7 @@ static int xemaclite_open(struct net_device *dev)
987 */ 987 */
988static int xemaclite_close(struct net_device *dev) 988static int xemaclite_close(struct net_device *dev)
989{ 989{
990 struct net_local *lp = (struct net_local *) netdev_priv(dev); 990 struct net_local *lp = netdev_priv(dev);
991 991
992 netif_stop_queue(dev); 992 netif_stop_queue(dev);
993 xemaclite_disable_interrupts(lp); 993 xemaclite_disable_interrupts(lp);
@@ -1001,21 +1001,6 @@ static int xemaclite_close(struct net_device *dev)
1001} 1001}
1002 1002
1003/** 1003/**
1004 * xemaclite_get_stats - Get the stats for the net_device
1005 * @dev: Pointer to the network device
1006 *
1007 * This function returns the address of the 'net_device_stats' structure for the
1008 * given network device. This structure holds usage statistics for the network
1009 * device.
1010 *
1011 * Return: Pointer to the net_device_stats structure.
1012 */
1013static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
1014{
1015 return &dev->stats;
1016}
1017
1018/**
1019 * xemaclite_send - Transmit a frame 1004 * xemaclite_send - Transmit a frame
1020 * @orig_skb: Pointer to the socket buffer to be transmitted 1005 * @orig_skb: Pointer to the socket buffer to be transmitted
1021 * @dev: Pointer to the network device 1006 * @dev: Pointer to the network device
@@ -1031,7 +1016,7 @@ static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
1031 */ 1016 */
1032static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) 1017static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1033{ 1018{
1034 struct net_local *lp = (struct net_local *) netdev_priv(dev); 1019 struct net_local *lp = netdev_priv(dev);
1035 struct sk_buff *new_skb; 1020 struct sk_buff *new_skb;
1036 unsigned int len; 1021 unsigned int len;
1037 unsigned long flags; 1022 unsigned long flags;
@@ -1068,7 +1053,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1068static void xemaclite_remove_ndev(struct net_device *ndev) 1053static void xemaclite_remove_ndev(struct net_device *ndev)
1069{ 1054{
1070 if (ndev) { 1055 if (ndev) {
1071 struct net_local *lp = (struct net_local *) netdev_priv(ndev); 1056 struct net_local *lp = netdev_priv(ndev);
1072 1057
1073 if (lp->base_addr) 1058 if (lp->base_addr)
1074 iounmap((void __iomem __force *) (lp->base_addr)); 1059 iounmap((void __iomem __force *) (lp->base_addr));
@@ -1245,7 +1230,7 @@ static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
1245 struct device *dev = &of_dev->dev; 1230 struct device *dev = &of_dev->dev;
1246 struct net_device *ndev = dev_get_drvdata(dev); 1231 struct net_device *ndev = dev_get_drvdata(dev);
1247 1232
1248 struct net_local *lp = (struct net_local *) netdev_priv(ndev); 1233 struct net_local *lp = netdev_priv(ndev);
1249 1234
1250 /* Un-register the mii_bus, if configured */ 1235 /* Un-register the mii_bus, if configured */
1251 if (lp->has_mdio) { 1236 if (lp->has_mdio) {
@@ -1285,7 +1270,6 @@ static struct net_device_ops xemaclite_netdev_ops = {
1285 .ndo_start_xmit = xemaclite_send, 1270 .ndo_start_xmit = xemaclite_send,
1286 .ndo_set_mac_address = xemaclite_set_mac_address, 1271 .ndo_set_mac_address = xemaclite_set_mac_address,
1287 .ndo_tx_timeout = xemaclite_tx_timeout, 1272 .ndo_tx_timeout = xemaclite_tx_timeout,
1288 .ndo_get_stats = xemaclite_get_stats,
1289#ifdef CONFIG_NET_POLL_CONTROLLER 1273#ifdef CONFIG_NET_POLL_CONTROLLER
1290 .ndo_poll_controller = xemaclite_poll_controller, 1274 .ndo_poll_controller = xemaclite_poll_controller,
1291#endif 1275#endif
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index c3a32920451..ae07b3dfbcc 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -124,7 +124,7 @@ MODULE_LICENSE("GPL");
124#define TX_BUF_SIZE 8192 124#define TX_BUF_SIZE 8192
125#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */ 125#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
126 126
127#define TX_TIMEOUT 10 127#define TX_TIMEOUT (HZ/10)
128 128
129struct znet_private { 129struct znet_private {
130 int rx_dma, tx_dma; 130 int rx_dma, tx_dma;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0f19d540b65..c9f13b9ea33 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1188,7 +1188,8 @@ lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1188 spin_lock_irqsave(&card->ipm_lock, flags); 1188 spin_lock_irqsave(&card->ipm_lock, flags);
1189 list_for_each(l, &card->ipm_list) { 1189 list_for_each(l, &card->ipm_list) {
1190 ipm = list_entry(l, struct lcs_ipm_list, list); 1190 ipm = list_entry(l, struct lcs_ipm_list, list);
1191 for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) { 1191 for (im4 = rcu_dereference(in4_dev->mc_list);
1192 im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
1192 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); 1193 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1193 if ( (ipm->ipm.ip_addr == im4->multiaddr) && 1194 if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
1194 (memcmp(buf, &ipm->ipm.mac_addr, 1195 (memcmp(buf, &ipm->ipm.mac_addr,
@@ -1233,7 +1234,8 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1233 unsigned long flags; 1234 unsigned long flags;
1234 1235
1235 LCS_DBF_TEXT(4, trace, "setmclst"); 1236 LCS_DBF_TEXT(4, trace, "setmclst");
1236 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { 1237 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1238 im4 = rcu_dereference(im4->next_rcu)) {
1237 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); 1239 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1238 ipm = lcs_check_addr_entry(card, im4, buf); 1240 ipm = lcs_check_addr_entry(card, im4, buf);
1239 if (ipm != NULL) 1241 if (ipm != NULL)
@@ -1269,10 +1271,10 @@ lcs_register_mc_addresses(void *data)
1269 in4_dev = in_dev_get(card->dev); 1271 in4_dev = in_dev_get(card->dev);
1270 if (in4_dev == NULL) 1272 if (in4_dev == NULL)
1271 goto out; 1273 goto out;
1272 read_lock(&in4_dev->mc_list_lock); 1274 rcu_read_lock();
1273 lcs_remove_mc_addresses(card,in4_dev); 1275 lcs_remove_mc_addresses(card,in4_dev);
1274 lcs_set_mc_addresses(card, in4_dev); 1276 lcs_set_mc_addresses(card, in4_dev);
1275 read_unlock(&in4_dev->mc_list_lock); 1277 rcu_read_unlock();
1276 in_dev_put(in4_dev); 1278 in_dev_put(in4_dev);
1277 1279
1278 netif_carrier_off(card->dev); 1280 netif_carrier_off(card->dev);
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 42fa783a70c..b5e967cf7e2 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -372,7 +372,7 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
372 i = simple_strtoul(buf, &tmp, 16); 372 i = simple_strtoul(buf, &tmp, 16);
373 if ((i == 0) || (i == 1)) { 373 if ((i == 0) || (i == 1)) {
374 if (i == card->options.performance_stats) 374 if (i == card->options.performance_stats)
375 goto out;; 375 goto out;
376 card->options.performance_stats = i; 376 card->options.performance_stats = i;
377 if (i == 0) 377 if (i == 0)
378 memset(&card->perf_stats, 0, 378 memset(&card->perf_stats, 0,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 847e8797073..7a7a1b66478 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -849,8 +849,6 @@ static int qeth_l2_open(struct net_device *dev)
849 card->state = CARD_STATE_UP; 849 card->state = CARD_STATE_UP;
850 netif_start_queue(dev); 850 netif_start_queue(dev);
851 851
852 if (!card->lan_online && netif_carrier_ok(dev))
853 netif_carrier_off(dev);
854 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { 852 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
855 napi_enable(&card->napi); 853 napi_enable(&card->napi);
856 napi_schedule(&card->napi); 854 napi_schedule(&card->napi);
@@ -1013,13 +1011,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1013 dev_warn(&card->gdev->dev, 1011 dev_warn(&card->gdev->dev,
1014 "The LAN is offline\n"); 1012 "The LAN is offline\n");
1015 card->lan_online = 0; 1013 card->lan_online = 0;
1016 goto out; 1014 goto contin;
1017 } 1015 }
1018 rc = -ENODEV; 1016 rc = -ENODEV;
1019 goto out_remove; 1017 goto out_remove;
1020 } else 1018 } else
1021 card->lan_online = 1; 1019 card->lan_online = 1;
1022 1020
1021contin:
1023 if ((card->info.type == QETH_CARD_TYPE_OSD) || 1022 if ((card->info.type == QETH_CARD_TYPE_OSD) ||
1024 (card->info.type == QETH_CARD_TYPE_OSX)) 1023 (card->info.type == QETH_CARD_TYPE_OSX))
1025 /* configure isolation level */ 1024 /* configure isolation level */
@@ -1038,7 +1037,10 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1038 goto out_remove; 1037 goto out_remove;
1039 } 1038 }
1040 card->state = CARD_STATE_SOFTSETUP; 1039 card->state = CARD_STATE_SOFTSETUP;
1041 netif_carrier_on(card->dev); 1040 if (card->lan_online)
1041 netif_carrier_on(card->dev);
1042 else
1043 netif_carrier_off(card->dev);
1042 1044
1043 qeth_set_allowed_threads(card, 0xffffffff, 0); 1045 qeth_set_allowed_threads(card, 0xffffffff, 0);
1044 if (recover_flag == CARD_STATE_RECOVER) { 1046 if (recover_flag == CARD_STATE_RECOVER) {
@@ -1055,7 +1057,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1055 } 1057 }
1056 /* let user_space know that device is online */ 1058 /* let user_space know that device is online */
1057 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1059 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1058out:
1059 mutex_unlock(&card->conf_mutex); 1060 mutex_unlock(&card->conf_mutex);
1060 mutex_unlock(&card->discipline_mutex); 1061 mutex_unlock(&card->discipline_mutex);
1061 return 0; 1062 return 0;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 74d1401a5d5..a1abb37db00 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1796,7 +1796,8 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
1796 char buf[MAX_ADDR_LEN]; 1796 char buf[MAX_ADDR_LEN];
1797 1797
1798 QETH_CARD_TEXT(card, 4, "addmc"); 1798 QETH_CARD_TEXT(card, 4, "addmc");
1799 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { 1799 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1800 im4 = rcu_dereference(im4->next_rcu)) {
1800 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); 1801 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
1801 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1802 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1802 if (!ipm) 1803 if (!ipm)
@@ -1828,9 +1829,9 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1828 in_dev = in_dev_get(netdev); 1829 in_dev = in_dev_get(netdev);
1829 if (!in_dev) 1830 if (!in_dev)
1830 continue; 1831 continue;
1831 read_lock(&in_dev->mc_list_lock); 1832 rcu_read_lock();
1832 qeth_l3_add_mc(card, in_dev); 1833 qeth_l3_add_mc(card, in_dev);
1833 read_unlock(&in_dev->mc_list_lock); 1834 rcu_read_unlock();
1834 in_dev_put(in_dev); 1835 in_dev_put(in_dev);
1835 } 1836 }
1836} 1837}
@@ -1843,10 +1844,10 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1843 in4_dev = in_dev_get(card->dev); 1844 in4_dev = in_dev_get(card->dev);
1844 if (in4_dev == NULL) 1845 if (in4_dev == NULL)
1845 return; 1846 return;
1846 read_lock(&in4_dev->mc_list_lock); 1847 rcu_read_lock();
1847 qeth_l3_add_mc(card, in4_dev); 1848 qeth_l3_add_mc(card, in4_dev);
1848 qeth_l3_add_vlan_mc(card); 1849 qeth_l3_add_vlan_mc(card);
1849 read_unlock(&in4_dev->mc_list_lock); 1850 rcu_read_unlock();
1850 in_dev_put(in4_dev); 1851 in_dev_put(in4_dev);
1851} 1852}
1852 1853
@@ -2938,6 +2939,7 @@ static void qeth_tso_fill_header(struct qeth_card *card,
2938 2939
2939 /*fix header to TSO values ...*/ 2940 /*fix header to TSO values ...*/
2940 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; 2941 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
2942 hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
2941 /*set values which are fix for the first approach ...*/ 2943 /*set values which are fix for the first approach ...*/
2942 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); 2944 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
2943 hdr->ext.imb_hdr_no = 1; 2945 hdr->ext.imb_hdr_no = 1;
@@ -3176,8 +3178,6 @@ static int qeth_l3_open(struct net_device *dev)
3176 card->state = CARD_STATE_UP; 3178 card->state = CARD_STATE_UP;
3177 netif_start_queue(dev); 3179 netif_start_queue(dev);
3178 3180
3179 if (!card->lan_online && netif_carrier_ok(dev))
3180 netif_carrier_off(dev);
3181 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { 3181 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
3182 napi_enable(&card->napi); 3182 napi_enable(&card->napi);
3183 napi_schedule(&card->napi); 3183 napi_schedule(&card->napi);
@@ -3449,13 +3449,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3449 dev_warn(&card->gdev->dev, 3449 dev_warn(&card->gdev->dev,
3450 "The LAN is offline\n"); 3450 "The LAN is offline\n");
3451 card->lan_online = 0; 3451 card->lan_online = 0;
3452 goto out; 3452 goto contin;
3453 } 3453 }
3454 rc = -ENODEV; 3454 rc = -ENODEV;
3455 goto out_remove; 3455 goto out_remove;
3456 } else 3456 } else
3457 card->lan_online = 1; 3457 card->lan_online = 1;
3458 3458
3459contin:
3459 rc = qeth_l3_setadapter_parms(card); 3460 rc = qeth_l3_setadapter_parms(card);
3460 if (rc) 3461 if (rc)
3461 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3462 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
@@ -3480,10 +3481,13 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3480 goto out_remove; 3481 goto out_remove;
3481 } 3482 }
3482 card->state = CARD_STATE_SOFTSETUP; 3483 card->state = CARD_STATE_SOFTSETUP;
3483 netif_carrier_on(card->dev);
3484 3484
3485 qeth_set_allowed_threads(card, 0xffffffff, 0); 3485 qeth_set_allowed_threads(card, 0xffffffff, 0);
3486 qeth_l3_set_ip_addr_list(card); 3486 qeth_l3_set_ip_addr_list(card);
3487 if (card->lan_online)
3488 netif_carrier_on(card->dev);
3489 else
3490 netif_carrier_off(card->dev);
3487 if (recover_flag == CARD_STATE_RECOVER) { 3491 if (recover_flag == CARD_STATE_RECOVER) {
3488 if (recovery_mode) 3492 if (recovery_mode)
3489 qeth_l3_open(card->dev); 3493 qeth_l3_open(card->dev);
@@ -3496,7 +3500,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3496 } 3500 }
3497 /* let user_space know that device is online */ 3501 /* let user_space know that device is online */
3498 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 3502 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
3499out:
3500 mutex_unlock(&card->conf_mutex); 3503 mutex_unlock(&card->conf_mutex);
3501 mutex_unlock(&card->discipline_mutex); 3504 mutex_unlock(&card->discipline_mutex);
3502 return 0; 3505 return 0;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 6e88d2b603b..f52966305e0 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -573,37 +573,34 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
573 ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision); 573 ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision);
574 memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */ 574 memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */
575 memset(out->et1mac, 0xFF, 6); 575 memset(out->et1mac, 0xFF, 6);
576
576 if ((bus->chip_id & 0xFF00) == 0x4400) { 577 if ((bus->chip_id & 0xFF00) == 0x4400) {
577 /* Workaround: The BCM44XX chip has a stupid revision 578 /* Workaround: The BCM44XX chip has a stupid revision
578 * number stored in the SPROM. 579 * number stored in the SPROM.
579 * Always extract r1. */ 580 * Always extract r1. */
580 out->revision = 1; 581 out->revision = 1;
582 ssb_dprintk(KERN_DEBUG PFX "SPROM treated as revision %d\n", out->revision);
583 }
584
585 switch (out->revision) {
586 case 1:
587 case 2:
588 case 3:
581 sprom_extract_r123(out, in); 589 sprom_extract_r123(out, in);
582 } else if (bus->chip_id == 0x4321) { 590 break;
583 /* the BCM4328 has a chipid == 0x4321 and a rev 4 SPROM */ 591 case 4:
584 out->revision = 4; 592 case 5:
585 sprom_extract_r45(out, in); 593 sprom_extract_r45(out, in);
586 } else { 594 break;
587 switch (out->revision) { 595 case 8:
588 case 1: 596 sprom_extract_r8(out, in);
589 case 2: 597 break;
590 case 3: 598 default:
591 sprom_extract_r123(out, in); 599 ssb_printk(KERN_WARNING PFX "Unsupported SPROM"
592 break; 600 " revision %d detected. Will extract"
593 case 4: 601 " v1\n", out->revision);
594 case 5: 602 out->revision = 1;
595 sprom_extract_r45(out, in); 603 sprom_extract_r123(out, in);
596 break;
597 case 8:
598 sprom_extract_r8(out, in);
599 break;
600 default:
601 ssb_printk(KERN_WARNING PFX "Unsupported SPROM"
602 " revision %d detected. Will extract"
603 " v1\n", out->revision);
604 out->revision = 1;
605 sprom_extract_r123(out, in);
606 }
607 } 604 }
608 605
609 if (out->boardflags_lo == 0xFFFF) 606 if (out->boardflags_lo == 0xFFFF)
@@ -618,7 +615,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
618 struct ssb_sprom *sprom) 615 struct ssb_sprom *sprom)
619{ 616{
620 const struct ssb_sprom *fallback; 617 const struct ssb_sprom *fallback;
621 int err = -ENOMEM; 618 int err;
622 u16 *buf; 619 u16 *buf;
623 620
624 if (!ssb_is_sprom_available(bus)) { 621 if (!ssb_is_sprom_available(bus)) {
@@ -645,7 +642,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
645 642
646 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); 643 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
647 if (!buf) 644 if (!buf)
648 goto out; 645 return -ENOMEM;
649 bus->sprom_size = SSB_SPROMSIZE_WORDS_R123; 646 bus->sprom_size = SSB_SPROMSIZE_WORDS_R123;
650 sprom_do_read(bus, buf); 647 sprom_do_read(bus, buf);
651 err = sprom_check_crc(buf, bus->sprom_size); 648 err = sprom_check_crc(buf, bus->sprom_size);
@@ -655,7 +652,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
655 buf = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16), 652 buf = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
656 GFP_KERNEL); 653 GFP_KERNEL);
657 if (!buf) 654 if (!buf)
658 goto out; 655 return -ENOMEM;
659 bus->sprom_size = SSB_SPROMSIZE_WORDS_R4; 656 bus->sprom_size = SSB_SPROMSIZE_WORDS_R4;
660 sprom_do_read(bus, buf); 657 sprom_do_read(bus, buf);
661 err = sprom_check_crc(buf, bus->sprom_size); 658 err = sprom_check_crc(buf, bus->sprom_size);
@@ -677,7 +674,6 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
677 674
678out_free: 675out_free:
679 kfree(buf); 676 kfree(buf);
680out:
681 return err; 677 return err;
682} 678}
683 679
diff --git a/drivers/ssb/pcihost_wrapper.c b/drivers/ssb/pcihost_wrapper.c
index 6536a041d90..f6c8c81a002 100644
--- a/drivers/ssb/pcihost_wrapper.c
+++ b/drivers/ssb/pcihost_wrapper.c
@@ -59,6 +59,7 @@ static int ssb_pcihost_probe(struct pci_dev *dev,
59 struct ssb_bus *ssb; 59 struct ssb_bus *ssb;
60 int err = -ENOMEM; 60 int err = -ENOMEM;
61 const char *name; 61 const char *name;
62 u32 val;
62 63
63 ssb = kzalloc(sizeof(*ssb), GFP_KERNEL); 64 ssb = kzalloc(sizeof(*ssb), GFP_KERNEL);
64 if (!ssb) 65 if (!ssb)
@@ -74,6 +75,12 @@ static int ssb_pcihost_probe(struct pci_dev *dev,
74 goto err_pci_disable; 75 goto err_pci_disable;
75 pci_set_master(dev); 76 pci_set_master(dev);
76 77
78 /* Disable the RETRY_TIMEOUT register (0x41) to keep
79 * PCI Tx retries from interfering with C3 CPU state */
80 pci_read_config_dword(dev, 0x40, &val);
81 if ((val & 0x0000ff00) != 0)
82 pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
83
77 err = ssb_bus_pcibus_register(ssb, dev); 84 err = ssb_bus_pcibus_register(ssb, dev);
78 if (err) 85 if (err)
79 goto err_pci_release_regions; 86 goto err_pci_release_regions;
diff --git a/include/linux/average.h b/include/linux/average.h
new file mode 100644
index 00000000000..7706e40f95f
--- /dev/null
+++ b/include/linux/average.h
@@ -0,0 +1,32 @@
1#ifndef _LINUX_AVERAGE_H
2#define _LINUX_AVERAGE_H
3
4#include <linux/kernel.h>
5
6/* Exponentially weighted moving average (EWMA) */
7
8/* For more documentation see lib/average.c */
9
10struct ewma {
11 unsigned long internal;
12 unsigned long factor;
13 unsigned long weight;
14};
15
16extern void ewma_init(struct ewma *avg, unsigned long factor,
17 unsigned long weight);
18
19extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
20
21/**
22 * ewma_read() - Get average value
23 * @avg: Average structure
24 *
25 * Returns the average value held in @avg.
26 */
27static inline unsigned long ewma_read(const struct ewma *avg)
28{
29 return DIV_ROUND_CLOSEST(avg->internal, avg->factor);
30}
31
32#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 827cc95711e..2184c6b97ae 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -109,6 +109,17 @@ static inline __u8 ror8(__u8 word, unsigned int shift)
109 return (word >> shift) | (word << (8 - shift)); 109 return (word >> shift) | (word << (8 - shift));
110} 110}
111 111
112/**
113 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
114 * @value: value to sign extend
115 * @index: 0 based bit index (0<=index<32) to sign bit
116 */
117static inline __s32 sign_extend32(__u32 value, int index)
118{
119 __u8 shift = 31 - index;
120 return (__s32)(value << shift) >> shift;
121}
122
112static inline unsigned fls_long(unsigned long l) 123static inline unsigned fls_long(unsigned long l)
113{ 124{
114 if (sizeof(l) == 4) 125 if (sizeof(l) == 4)
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 749f01ccd26..010e2d87ed7 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -197,6 +197,21 @@ enum dccp_feature_numbers {
197 DCCPF_MAX_CCID_SPECIFIC = 255, 197 DCCPF_MAX_CCID_SPECIFIC = 255,
198}; 198};
199 199
200/* DCCP socket control message types for cmsg */
201enum dccp_cmsg_type {
202 DCCP_SCM_PRIORITY = 1,
203 DCCP_SCM_QPOLICY_MAX = 0xFFFF,
204 /* ^-- Up to here reserved exclusively for qpolicy parameters */
205 DCCP_SCM_MAX
206};
207
208/* DCCP priorities for outgoing/queued packets */
209enum dccp_packet_dequeueing_policy {
210 DCCPQ_POLICY_SIMPLE,
211 DCCPQ_POLICY_PRIO,
212 DCCPQ_POLICY_MAX
213};
214
200/* DCCP socket options */ 215/* DCCP socket options */
201#define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */ 216#define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */
202#define DCCP_SOCKOPT_SERVICE 2 217#define DCCP_SOCKOPT_SERVICE 2
@@ -210,6 +225,8 @@ enum dccp_feature_numbers {
210#define DCCP_SOCKOPT_CCID 13 225#define DCCP_SOCKOPT_CCID 13
211#define DCCP_SOCKOPT_TX_CCID 14 226#define DCCP_SOCKOPT_TX_CCID 14
212#define DCCP_SOCKOPT_RX_CCID 15 227#define DCCP_SOCKOPT_RX_CCID 15
228#define DCCP_SOCKOPT_QPOLICY_ID 16
229#define DCCP_SOCKOPT_QPOLICY_TXQLEN 17
213#define DCCP_SOCKOPT_CCID_RX_INFO 128 230#define DCCP_SOCKOPT_CCID_RX_INFO 128
214#define DCCP_SOCKOPT_CCID_TX_INFO 192 231#define DCCP_SOCKOPT_CCID_TX_INFO 192
215 232
@@ -458,10 +475,13 @@ struct dccp_ackvec;
458 * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) 475 * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
459 * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection) 476 * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
460 * @dccps_options_received - parsed set of retrieved options 477 * @dccps_options_received - parsed set of retrieved options
478 * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy
479 * @dccps_tx_qlen - maximum length of the TX queue
461 * @dccps_role - role of this sock, one of %dccp_role 480 * @dccps_role - role of this sock, one of %dccp_role
462 * @dccps_hc_rx_insert_options - receiver wants to add options when acking 481 * @dccps_hc_rx_insert_options - receiver wants to add options when acking
463 * @dccps_hc_tx_insert_options - sender wants to add options when sending 482 * @dccps_hc_tx_insert_options - sender wants to add options when sending
464 * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3) 483 * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
484 * @dccps_sync_scheduled - flag which signals "send out-of-band message soon"
465 * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets 485 * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
466 * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing) 486 * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
467 * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) 487 * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
@@ -499,10 +519,13 @@ struct dccp_sock {
499 struct ccid *dccps_hc_rx_ccid; 519 struct ccid *dccps_hc_rx_ccid;
500 struct ccid *dccps_hc_tx_ccid; 520 struct ccid *dccps_hc_tx_ccid;
501 struct dccp_options_received dccps_options_received; 521 struct dccp_options_received dccps_options_received;
522 __u8 dccps_qpolicy;
523 __u32 dccps_tx_qlen;
502 enum dccp_role dccps_role:2; 524 enum dccp_role dccps_role:2;
503 __u8 dccps_hc_rx_insert_options:1; 525 __u8 dccps_hc_rx_insert_options:1;
504 __u8 dccps_hc_tx_insert_options:1; 526 __u8 dccps_hc_tx_insert_options:1;
505 __u8 dccps_server_timewait:1; 527 __u8 dccps_server_timewait:1;
528 __u8 dccps_sync_scheduled:1;
506 struct tasklet_struct dccps_xmitlet; 529 struct tasklet_struct dccps_xmitlet;
507 struct timer_list dccps_xmit_timer; 530 struct timer_list dccps_xmit_timer;
508}; 531};
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 69b43dbea6c..45266b75409 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -91,54 +91,6 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
91#define BPF_TAX 0x00 91#define BPF_TAX 0x00
92#define BPF_TXA 0x80 92#define BPF_TXA 0x80
93 93
94enum {
95 BPF_S_RET_K = 0,
96 BPF_S_RET_A,
97 BPF_S_ALU_ADD_K,
98 BPF_S_ALU_ADD_X,
99 BPF_S_ALU_SUB_K,
100 BPF_S_ALU_SUB_X,
101 BPF_S_ALU_MUL_K,
102 BPF_S_ALU_MUL_X,
103 BPF_S_ALU_DIV_X,
104 BPF_S_ALU_AND_K,
105 BPF_S_ALU_AND_X,
106 BPF_S_ALU_OR_K,
107 BPF_S_ALU_OR_X,
108 BPF_S_ALU_LSH_K,
109 BPF_S_ALU_LSH_X,
110 BPF_S_ALU_RSH_K,
111 BPF_S_ALU_RSH_X,
112 BPF_S_ALU_NEG,
113 BPF_S_LD_W_ABS,
114 BPF_S_LD_H_ABS,
115 BPF_S_LD_B_ABS,
116 BPF_S_LD_W_LEN,
117 BPF_S_LD_W_IND,
118 BPF_S_LD_H_IND,
119 BPF_S_LD_B_IND,
120 BPF_S_LD_IMM,
121 BPF_S_LDX_W_LEN,
122 BPF_S_LDX_B_MSH,
123 BPF_S_LDX_IMM,
124 BPF_S_MISC_TAX,
125 BPF_S_MISC_TXA,
126 BPF_S_ALU_DIV_K,
127 BPF_S_LD_MEM,
128 BPF_S_LDX_MEM,
129 BPF_S_ST,
130 BPF_S_STX,
131 BPF_S_JMP_JA,
132 BPF_S_JMP_JEQ_K,
133 BPF_S_JMP_JEQ_X,
134 BPF_S_JMP_JGE_K,
135 BPF_S_JMP_JGE_X,
136 BPF_S_JMP_JGT_K,
137 BPF_S_JMP_JGT_X,
138 BPF_S_JMP_JSET_K,
139 BPF_S_JMP_JSET_X,
140};
141
142#ifndef BPF_MAXINSNS 94#ifndef BPF_MAXINSNS
143#define BPF_MAXINSNS 4096 95#define BPF_MAXINSNS 4096
144#endif 96#endif
@@ -172,7 +124,9 @@ enum {
172#define SKF_AD_MARK 20 124#define SKF_AD_MARK 20
173#define SKF_AD_QUEUE 24 125#define SKF_AD_QUEUE 24
174#define SKF_AD_HATYPE 28 126#define SKF_AD_HATYPE 28
175#define SKF_AD_MAX 32 127#define SKF_AD_RXHASH 32
128#define SKF_AD_CPU 36
129#define SKF_AD_MAX 40
176#define SKF_NET_OFF (-0x100000) 130#define SKF_NET_OFF (-0x100000)
177#define SKF_LL_OFF (-0x200000) 131#define SKF_LL_OFF (-0x200000)
178 132
@@ -194,8 +148,8 @@ struct sk_buff;
194struct sock; 148struct sock;
195 149
196extern int sk_filter(struct sock *sk, struct sk_buff *skb); 150extern int sk_filter(struct sock *sk, struct sk_buff *skb);
197extern unsigned int sk_run_filter(struct sk_buff *skb, 151extern unsigned int sk_run_filter(const struct sk_buff *skb,
198 struct sock_filter *filter, int flen); 152 const struct sock_filter *filter);
199extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 153extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
200extern int sk_detach_filter(struct sock *sk); 154extern int sk_detach_filter(struct sock *sk);
201extern int sk_chk_filter(struct sock_filter *filter, int flen); 155extern int sk_chk_filter(struct sock_filter *filter, int flen);
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 0d241a5c490..f7e73c338c4 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -102,7 +102,9 @@ struct __fdb_entry {
102#include <linux/netdevice.h> 102#include <linux/netdevice.h>
103 103
104extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); 104extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
105extern int (*br_should_route_hook)(struct sk_buff *skb); 105
106typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
107extern br_should_route_hook_t __rcu *br_should_route_hook;
106 108
107#endif 109#endif
108 110
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 2fc66dd783e..6485d2a89be 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -80,6 +80,24 @@ struct rtnl_link_ifmap {
80 __u8 port; 80 __u8 port;
81}; 81};
82 82
83/*
84 * IFLA_AF_SPEC
85 * Contains nested attributes for address family specific attributes.
86 * Each address family may create a attribute with the address family
87 * number as type and create its own attribute structure in it.
88 *
89 * Example:
90 * [IFLA_AF_SPEC] = {
91 * [AF_INET] = {
92 * [IFLA_INET_CONF] = ...,
93 * },
94 * [AF_INET6] = {
95 * [IFLA_INET6_FLAGS] = ...,
96 * [IFLA_INET6_CONF] = ...,
97 * }
98 * }
99 */
100
83enum { 101enum {
84 IFLA_UNSPEC, 102 IFLA_UNSPEC,
85 IFLA_ADDRESS, 103 IFLA_ADDRESS,
@@ -116,6 +134,7 @@ enum {
116 IFLA_STATS64, 134 IFLA_STATS64,
117 IFLA_VF_PORTS, 135 IFLA_VF_PORTS,
118 IFLA_PORT_SELF, 136 IFLA_PORT_SELF,
137 IFLA_AF_SPEC,
119 __IFLA_MAX 138 __IFLA_MAX
120}; 139};
121 140
@@ -128,6 +147,14 @@ enum {
128#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) 147#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))
129#endif 148#endif
130 149
150enum {
151 IFLA_INET_UNSPEC,
152 IFLA_INET_CONF,
153 __IFLA_INET_MAX,
154};
155
156#define IFLA_INET_MAX (__IFLA_INET_MAX - 1)
157
131/* ifi_flags. 158/* ifi_flags.
132 159
133 IFF_* flags. 160 IFF_* flags.
@@ -232,6 +259,7 @@ enum macvlan_mode {
232 MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */ 259 MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */
233 MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */ 260 MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */
234 MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ 261 MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
262 MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */
235}; 263};
236 264
237/* SR-IOV virtual function management section */ 265/* SR-IOV virtual function management section */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 8a2fd66a8b5..e28b2e4959d 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -25,19 +25,25 @@ struct macvlan_port;
25struct macvtap_queue; 25struct macvtap_queue;
26 26
27/** 27/**
28 * struct macvlan_rx_stats - MACVLAN percpu rx stats 28 * struct macvlan_pcpu_stats - MACVLAN percpu stats
29 * @rx_packets: number of received packets 29 * @rx_packets: number of received packets
30 * @rx_bytes: number of received bytes 30 * @rx_bytes: number of received bytes
31 * @rx_multicast: number of received multicast packets 31 * @rx_multicast: number of received multicast packets
32 * @tx_packets: number of transmitted packets
33 * @tx_bytes: number of transmitted bytes
32 * @syncp: synchronization point for 64bit counters 34 * @syncp: synchronization point for 64bit counters
33 * @rx_errors: number of errors 35 * @rx_errors: number of rx errors
36 * @tx_dropped: number of tx dropped packets
34 */ 37 */
35struct macvlan_rx_stats { 38struct macvlan_pcpu_stats {
36 u64 rx_packets; 39 u64 rx_packets;
37 u64 rx_bytes; 40 u64 rx_bytes;
38 u64 rx_multicast; 41 u64 rx_multicast;
42 u64 tx_packets;
43 u64 tx_bytes;
39 struct u64_stats_sync syncp; 44 struct u64_stats_sync syncp;
40 unsigned long rx_errors; 45 u32 rx_errors;
46 u32 tx_dropped;
41}; 47};
42 48
43/* 49/*
@@ -52,7 +58,7 @@ struct macvlan_dev {
52 struct hlist_node hlist; 58 struct hlist_node hlist;
53 struct macvlan_port *port; 59 struct macvlan_port *port;
54 struct net_device *lowerdev; 60 struct net_device *lowerdev;
55 struct macvlan_rx_stats __percpu *rx_stats; 61 struct macvlan_pcpu_stats __percpu *pcpu_stats;
56 enum macvlan_mode mode; 62 enum macvlan_mode mode;
57 int (*receive)(struct sk_buff *skb); 63 int (*receive)(struct sk_buff *skb);
58 int (*forward)(struct net_device *dev, struct sk_buff *skb); 64 int (*forward)(struct net_device *dev, struct sk_buff *skb);
@@ -64,18 +70,18 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
64 unsigned int len, bool success, 70 unsigned int len, bool success,
65 bool multicast) 71 bool multicast)
66{ 72{
67 struct macvlan_rx_stats *rx_stats;
68
69 rx_stats = this_cpu_ptr(vlan->rx_stats);
70 if (likely(success)) { 73 if (likely(success)) {
71 u64_stats_update_begin(&rx_stats->syncp); 74 struct macvlan_pcpu_stats *pcpu_stats;
72 rx_stats->rx_packets++;; 75
73 rx_stats->rx_bytes += len; 76 pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
77 u64_stats_update_begin(&pcpu_stats->syncp);
78 pcpu_stats->rx_packets++;
79 pcpu_stats->rx_bytes += len;
74 if (multicast) 80 if (multicast)
75 rx_stats->rx_multicast++; 81 pcpu_stats->rx_multicast++;
76 u64_stats_update_end(&rx_stats->syncp); 82 u64_stats_update_end(&pcpu_stats->syncp);
77 } else { 83 } else {
78 rx_stats->rx_errors++; 84 this_cpu_inc(vlan->pcpu_stats->rx_errors);
79 } 85 }
80} 86}
81 87
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 93fc2449af1..c4987f26510 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -167,10 +167,10 @@ struct ip_sf_socklist {
167 */ 167 */
168 168
169struct ip_mc_socklist { 169struct ip_mc_socklist {
170 struct ip_mc_socklist *next; 170 struct ip_mc_socklist __rcu *next_rcu;
171 struct ip_mreqn multi; 171 struct ip_mreqn multi;
172 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ 172 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
173 struct ip_sf_socklist *sflist; 173 struct ip_sf_socklist __rcu *sflist;
174 struct rcu_head rcu; 174 struct rcu_head rcu;
175}; 175};
176 176
@@ -186,11 +186,14 @@ struct ip_sf_list {
186struct ip_mc_list { 186struct ip_mc_list {
187 struct in_device *interface; 187 struct in_device *interface;
188 __be32 multiaddr; 188 __be32 multiaddr;
189 unsigned int sfmode;
189 struct ip_sf_list *sources; 190 struct ip_sf_list *sources;
190 struct ip_sf_list *tomb; 191 struct ip_sf_list *tomb;
191 unsigned int sfmode;
192 unsigned long sfcount[2]; 192 unsigned long sfcount[2];
193 struct ip_mc_list *next; 193 union {
194 struct ip_mc_list *next;
195 struct ip_mc_list __rcu *next_rcu;
196 };
194 struct timer_list timer; 197 struct timer_list timer;
195 int users; 198 int users;
196 atomic_t refcnt; 199 atomic_t refcnt;
@@ -201,6 +204,7 @@ struct ip_mc_list {
201 char loaded; 204 char loaded;
202 unsigned char gsquery; /* check source marks? */ 205 unsigned char gsquery; /* check source marks? */
203 unsigned char crcount; 206 unsigned char crcount;
207 struct rcu_head rcu;
204}; 208};
205 209
206/* V3 exponential field decoding */ 210/* V3 exponential field decoding */
@@ -234,7 +238,7 @@ extern void ip_mc_unmap(struct in_device *);
234extern void ip_mc_remap(struct in_device *); 238extern void ip_mc_remap(struct in_device *);
235extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); 239extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
236extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); 240extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
237extern void ip_mc_rejoin_group(struct ip_mc_list *im); 241extern void ip_mc_rejoin_groups(struct in_device *in_dev);
238 242
239#endif 243#endif
240#endif 244#endif
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ccd5b07d678..ae8fdc54e0c 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -41,10 +41,12 @@ enum
41 __IPV4_DEVCONF_MAX 41 __IPV4_DEVCONF_MAX
42}; 42};
43 43
44#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
45
44struct ipv4_devconf { 46struct ipv4_devconf {
45 void *sysctl; 47 void *sysctl;
46 int data[__IPV4_DEVCONF_MAX - 1]; 48 int data[IPV4_DEVCONF_MAX];
47 DECLARE_BITMAP(state, __IPV4_DEVCONF_MAX - 1); 49 DECLARE_BITMAP(state, IPV4_DEVCONF_MAX);
48}; 50};
49 51
50struct in_device { 52struct in_device {
@@ -52,9 +54,8 @@ struct in_device {
52 atomic_t refcnt; 54 atomic_t refcnt;
53 int dead; 55 int dead;
54 struct in_ifaddr *ifa_list; /* IP ifaddr chain */ 56 struct in_ifaddr *ifa_list; /* IP ifaddr chain */
55 rwlock_t mc_list_lock; 57 struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */
56 struct ip_mc_list *mc_list; /* IP multicast filter chain */ 58 int mc_count; /* Number of installed mcasts */
57 int mc_count; /* Number of installed mcasts */
58 spinlock_t mc_tomb_lock; 59 spinlock_t mc_tomb_lock;
59 struct ip_mc_list *mc_tomb; 60 struct ip_mc_list *mc_tomb;
60 unsigned long mr_v1_seen; 61 unsigned long mr_v1_seen;
@@ -91,7 +92,7 @@ static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
91 92
92static inline void ipv4_devconf_setall(struct in_device *in_dev) 93static inline void ipv4_devconf_setall(struct in_device *in_dev)
93{ 94{
94 bitmap_fill(in_dev->cnf.state, __IPV4_DEVCONF_MAX - 1); 95 bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX);
95} 96}
96 97
97#define IN_DEV_CONF_GET(in_dev, attr) \ 98#define IN_DEV_CONF_GET(in_dev, attr) \
@@ -221,7 +222,7 @@ static inline struct in_device *in_dev_get(const struct net_device *dev)
221 222
222static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) 223static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
223{ 224{
224 return rcu_dereference_check(dev->ip_ptr, lockdep_rtnl_is_held()); 225 return rtnl_dereference(dev->ip_ptr);
225} 226}
226 227
227extern void in_dev_finish_destroy(struct in_device *idev); 228extern void in_dev_finish_destroy(struct in_device *idev);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 8e429d0e040..0c997767429 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -364,7 +364,7 @@ struct ipv6_pinfo {
364 364
365 __u32 dst_cookie; 365 __u32 dst_cookie;
366 366
367 struct ipv6_mc_socklist *ipv6_mc_list; 367 struct ipv6_mc_socklist __rcu *ipv6_mc_list;
368 struct ipv6_ac_socklist *ipv6_ac_list; 368 struct ipv6_ac_socklist *ipv6_ac_list;
369 struct ipv6_fl_socklist *ipv6_fl_list; 369 struct ipv6_fl_socklist *ipv6_fl_list;
370 370
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index c779b49a1fd..b1494aced21 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -55,6 +55,7 @@
55#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */ 55#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
56#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */ 56#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
57#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */ 57#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
58#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
58 59
59/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ 60/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
60#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ 61#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
@@ -235,6 +236,10 @@
235#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */ 236#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */
236#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */ 237#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */
237 238
239/* AN EEE Advertisement register. */
240#define MDIO_AN_EEE_ADV_100TX 0x0002 /* Advertise 100TX EEE cap */
241#define MDIO_AN_EEE_ADV_1000T 0x0004 /* Advertise 1000T EEE cap */
242
238/* LASI RX_ALARM control/status registers. */ 243/* LASI RX_ALARM control/status registers. */
239#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */ 244#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */
240#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */ 245#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d8fd2c23a1b..d31bc3c9471 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -493,6 +493,8 @@ static inline void napi_synchronize(const struct napi_struct *n)
493enum netdev_queue_state_t { 493enum netdev_queue_state_t {
494 __QUEUE_STATE_XOFF, 494 __QUEUE_STATE_XOFF,
495 __QUEUE_STATE_FROZEN, 495 __QUEUE_STATE_FROZEN,
496#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
497 (1 << __QUEUE_STATE_FROZEN))
496}; 498};
497 499
498struct netdev_queue { 500struct netdev_queue {
@@ -503,6 +505,12 @@ struct netdev_queue {
503 struct Qdisc *qdisc; 505 struct Qdisc *qdisc;
504 unsigned long state; 506 unsigned long state;
505 struct Qdisc *qdisc_sleeping; 507 struct Qdisc *qdisc_sleeping;
508#ifdef CONFIG_RPS
509 struct kobject kobj;
510#endif
511#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
512 int numa_node;
513#endif
506/* 514/*
507 * write mostly part 515 * write mostly part
508 */ 516 */
@@ -517,6 +525,22 @@ struct netdev_queue {
517 u64 tx_dropped; 525 u64 tx_dropped;
518} ____cacheline_aligned_in_smp; 526} ____cacheline_aligned_in_smp;
519 527
528static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
529{
530#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
531 return q->numa_node;
532#else
533 return -1;
534#endif
535}
536
537static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
538{
539#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
540 q->numa_node = node;
541#endif
542}
543
520#ifdef CONFIG_RPS 544#ifdef CONFIG_RPS
521/* 545/*
522 * This structure holds an RPS map which can be of variable length. The 546 * This structure holds an RPS map which can be of variable length. The
@@ -592,11 +616,36 @@ struct netdev_rx_queue {
592 struct rps_map __rcu *rps_map; 616 struct rps_map __rcu *rps_map;
593 struct rps_dev_flow_table __rcu *rps_flow_table; 617 struct rps_dev_flow_table __rcu *rps_flow_table;
594 struct kobject kobj; 618 struct kobject kobj;
595 struct netdev_rx_queue *first; 619 struct net_device *dev;
596 atomic_t count;
597} ____cacheline_aligned_in_smp; 620} ____cacheline_aligned_in_smp;
598#endif /* CONFIG_RPS */ 621#endif /* CONFIG_RPS */
599 622
623#ifdef CONFIG_XPS
624/*
625 * This structure holds an XPS map which can be of variable length. The
626 * map is an array of queues.
627 */
628struct xps_map {
629 unsigned int len;
630 unsigned int alloc_len;
631 struct rcu_head rcu;
632 u16 queues[0];
633};
634#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
635#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
636 / sizeof(u16))
637
638/*
639 * This structure holds all XPS maps for device. Maps are indexed by CPU.
640 */
641struct xps_dev_maps {
642 struct rcu_head rcu;
643 struct xps_map __rcu *cpu_map[0];
644};
645#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
646 (nr_cpu_ids * sizeof(struct xps_map *)))
647#endif /* CONFIG_XPS */
648
600/* 649/*
601 * This structure defines the management hooks for network devices. 650 * This structure defines the management hooks for network devices.
602 * The following hooks can be defined; unless noted otherwise, they are 651 * The following hooks can be defined; unless noted otherwise, they are
@@ -951,7 +1000,7 @@ struct net_device {
951#endif 1000#endif
952 void *atalk_ptr; /* AppleTalk link */ 1001 void *atalk_ptr; /* AppleTalk link */
953 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 1002 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
954 void *dn_ptr; /* DECnet specific data */ 1003 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
955 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ 1004 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
956 void *ec_ptr; /* Econet specific data */ 1005 void *ec_ptr; /* Econet specific data */
957 void *ax25_ptr; /* AX.25 specific data */ 1006 void *ax25_ptr; /* AX.25 specific data */
@@ -995,8 +1044,8 @@ struct net_device {
995 unsigned int real_num_rx_queues; 1044 unsigned int real_num_rx_queues;
996#endif 1045#endif
997 1046
998 rx_handler_func_t *rx_handler; 1047 rx_handler_func_t __rcu *rx_handler;
999 void *rx_handler_data; 1048 void __rcu *rx_handler_data;
1000 1049
1001 struct netdev_queue __rcu *ingress_queue; 1050 struct netdev_queue __rcu *ingress_queue;
1002 1051
@@ -1017,6 +1066,10 @@ struct net_device {
1017 unsigned long tx_queue_len; /* Max frames per queue allowed */ 1066 unsigned long tx_queue_len; /* Max frames per queue allowed */
1018 spinlock_t tx_global_lock; 1067 spinlock_t tx_global_lock;
1019 1068
1069#ifdef CONFIG_XPS
1070 struct xps_dev_maps __rcu *xps_maps;
1071#endif
1072
1020 /* These may be needed for future network-power-down code. */ 1073 /* These may be needed for future network-power-down code. */
1021 1074
1022 /* 1075 /*
@@ -1307,7 +1360,8 @@ static inline struct net_device *first_net_device(struct net *net)
1307 1360
1308extern int netdev_boot_setup_check(struct net_device *dev); 1361extern int netdev_boot_setup_check(struct net_device *dev);
1309extern unsigned long netdev_boot_base(const char *prefix, int unit); 1362extern unsigned long netdev_boot_base(const char *prefix, int unit);
1310extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr); 1363extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1364 const char *hwaddr);
1311extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 1365extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1312extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); 1366extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1313extern void dev_add_pack(struct packet_type *pt); 1367extern void dev_add_pack(struct packet_type *pt);
@@ -1600,9 +1654,9 @@ static inline int netif_queue_stopped(const struct net_device *dev)
1600 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1654 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1601} 1655}
1602 1656
1603static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) 1657static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
1604{ 1658{
1605 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); 1659 return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
1606} 1660}
1607 1661
1608/** 1662/**
@@ -2239,6 +2293,8 @@ unsigned long netdev_fix_features(unsigned long features, const char *name);
2239void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2293void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2240 struct net_device *dev); 2294 struct net_device *dev);
2241 2295
2296int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev);
2297
2242static inline int net_gso_ok(int features, int gso_type) 2298static inline int net_gso_ok(int features, int gso_type)
2243{ 2299{
2244 int feature = gso_type << NETIF_F_GSO_SHIFT; 2300 int feature = gso_type << NETIF_F_GSO_SHIFT;
@@ -2254,10 +2310,7 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features)
2254static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 2310static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
2255{ 2311{
2256 if (skb_is_gso(skb)) { 2312 if (skb_is_gso(skb)) {
2257 int features = dev->features; 2313 int features = netif_get_vlan_features(skb, dev);
2258
2259 if (skb->protocol == htons(ETH_P_8021Q) || skb->vlan_tci)
2260 features &= dev->vlan_features;
2261 2314
2262 return (!skb_gso_ok(skb, features) || 2315 return (!skb_gso_ok(skb, features) ||
2263 unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); 2316 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 03317c8d407..1893837b396 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -33,6 +33,8 @@
33 33
34#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE) 34#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
35 35
36#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP)
37
36/* only for userspace compatibility */ 38/* only for userspace compatibility */
37#ifndef __KERNEL__ 39#ifndef __KERNEL__
38/* Generic cache responses from hook functions. 40/* Generic cache responses from hook functions.
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 0edb2566c14..d706bf3badc 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -804,6 +804,30 @@ enum nl80211_commands {
804 * @NL80211_ATTR_SUPPORT_IBSS_RSN: The device supports IBSS RSN, which mostly 804 * @NL80211_ATTR_SUPPORT_IBSS_RSN: The device supports IBSS RSN, which mostly
805 * means support for per-station GTKs. 805 * means support for per-station GTKs.
806 * 806 *
807 * @NL80211_ATTR_WIPHY_ANTENNA_TX: Bitmap of allowed antennas for transmitting.
808 * This can be used to mask out antennas which are not attached or should
809 * not be used for transmitting. If an antenna is not selected in this
810 * bitmap the hardware is not allowed to transmit on this antenna.
811 *
812 * Each bit represents one antenna, starting with antenna 1 at the first
813 * bit. Depending on which antennas are selected in the bitmap, 802.11n
814 * drivers can derive which chainmasks to use (if all antennas belonging to
815 * a particular chain are disabled this chain should be disabled) and if
816 * a chain has diversity antennas wether diversity should be used or not.
817 * HT capabilities (STBC, TX Beamforming, Antenna selection) can be
818 * derived from the available chains after applying the antenna mask.
819 * Non-802.11n drivers can derive wether to use diversity or not.
820 * Drivers may reject configurations or RX/TX mask combinations they cannot
821 * support by returning -EINVAL.
822 *
823 * @NL80211_ATTR_WIPHY_ANTENNA_RX: Bitmap of allowed antennas for receiving.
824 * This can be used to mask out antennas which are not attached or should
825 * not be used for receiving. If an antenna is not selected in this bitmap
826 * the hardware should not be configured to receive on this antenna.
827 * For a more detailed descripton see @NL80211_ATTR_WIPHY_ANTENNA_TX.
828 *
829 * @NL80211_ATTR_MCAST_RATE: Multicast tx rate (in 100 kbps) for IBSS
830 *
807 * @NL80211_ATTR_MAX: highest attribute number currently defined 831 * @NL80211_ATTR_MAX: highest attribute number currently defined
808 * @__NL80211_ATTR_AFTER_LAST: internal use 832 * @__NL80211_ATTR_AFTER_LAST: internal use
809 */ 833 */
@@ -973,6 +997,11 @@ enum nl80211_attrs {
973 997
974 NL80211_ATTR_SUPPORT_IBSS_RSN, 998 NL80211_ATTR_SUPPORT_IBSS_RSN,
975 999
1000 NL80211_ATTR_WIPHY_ANTENNA_TX,
1001 NL80211_ATTR_WIPHY_ANTENNA_RX,
1002
1003 NL80211_ATTR_MCAST_RATE,
1004
976 /* add attributes here, update the policy in nl80211.c */ 1005 /* add attributes here, update the policy in nl80211.c */
977 1006
978 __NL80211_ATTR_AFTER_LAST, 1007 __NL80211_ATTR_AFTER_LAST,
@@ -1307,7 +1336,11 @@ enum nl80211_bitrate_attr {
1307 * wireless core it thinks its knows the regulatory domain we should be in. 1336 * wireless core it thinks its knows the regulatory domain we should be in.
1308 * @NL80211_REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an 1337 * @NL80211_REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an
1309 * 802.11 country information element with regulatory information it 1338 * 802.11 country information element with regulatory information it
1310 * thinks we should consider. 1339 * thinks we should consider. cfg80211 only processes the country
1340 * code from the IE, and relies on the regulatory domain information
1341 * structure pased by userspace (CRDA) from our wireless-regdb.
1342 * If a channel is enabled but the country code indicates it should
1343 * be disabled we disable the channel and re-enable it upon disassociation.
1311 */ 1344 */
1312enum nl80211_reg_initiator { 1345enum nl80211_reg_initiator {
1313 NL80211_REGDOM_SET_BY_CORE, 1346 NL80211_REGDOM_SET_BY_CORE,
@@ -1786,6 +1819,8 @@ enum nl80211_ps_state {
1786 * the minimum amount the RSSI level must change after an event before a 1819 * the minimum amount the RSSI level must change after an event before a
1787 * new event may be issued (to reduce effects of RSSI oscillation). 1820 * new event may be issued (to reduce effects of RSSI oscillation).
1788 * @NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT: RSSI threshold event 1821 * @NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT: RSSI threshold event
1822 * @NL80211_ATTR_CQM_PKT_LOSS_EVENT: a u32 value indicating that this many
1823 * consecutive packets were not acknowledged by the peer
1789 * @__NL80211_ATTR_CQM_AFTER_LAST: internal 1824 * @__NL80211_ATTR_CQM_AFTER_LAST: internal
1790 * @NL80211_ATTR_CQM_MAX: highest key attribute 1825 * @NL80211_ATTR_CQM_MAX: highest key attribute
1791 */ 1826 */
@@ -1794,6 +1829,7 @@ enum nl80211_attr_cqm {
1794 NL80211_ATTR_CQM_RSSI_THOLD, 1829 NL80211_ATTR_CQM_RSSI_THOLD,
1795 NL80211_ATTR_CQM_RSSI_HYST, 1830 NL80211_ATTR_CQM_RSSI_HYST,
1796 NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, 1831 NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
1832 NL80211_ATTR_CQM_PKT_LOSS_EVENT,
1797 1833
1798 /* keep last */ 1834 /* keep last */
1799 __NL80211_ATTR_CQM_AFTER_LAST, 1835 __NL80211_ATTR_CQM_AFTER_LAST,
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 08c32e4f261..c6c608482cb 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -354,37 +354,6 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
354} 354}
355#endif /* RFKILL || RFKILL_MODULE */ 355#endif /* RFKILL || RFKILL_MODULE */
356 356
357
358#ifdef CONFIG_RFKILL_LEDS
359/**
360 * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED.
361 * This function might return a NULL pointer if registering of the
362 * LED trigger failed. Use this as "default_trigger" for the LED.
363 */
364const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
365
366/**
367 * rfkill_set_led_trigger_name -- set the LED trigger name
368 * @rfkill: rfkill struct
369 * @name: LED trigger name
370 *
371 * This function sets the LED trigger name of the radio LED
372 * trigger that rfkill creates. It is optional, but if called
373 * must be called before rfkill_register() to be effective.
374 */
375void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name);
376#else
377static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
378{
379 return NULL;
380}
381
382static inline void
383rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
384{
385}
386#endif
387
388#endif /* __KERNEL__ */ 357#endif /* __KERNEL__ */
389 358
390#endif /* RFKILL_H */ 359#endif /* RFKILL_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e6ba898de61..19f37a6ee6c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -386,9 +386,10 @@ struct sk_buff {
386#else 386#else
387 __u8 deliver_no_wcard:1; 387 __u8 deliver_no_wcard:1;
388#endif 388#endif
389 __u8 ooo_okay:1;
389 kmemcheck_bitfield_end(flags2); 390 kmemcheck_bitfield_end(flags2);
390 391
391 /* 0/14 bit hole */ 392 /* 0/13 bit hole */
392 393
393#ifdef CONFIG_NET_DMA 394#ifdef CONFIG_NET_DMA
394 dma_cookie_t dma_cookie; 395 dma_cookie_t dma_cookie;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index d66c61774d9..e1035291569 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -40,9 +40,9 @@ struct plat_stmmacenet_data {
40 int pmt; 40 int pmt;
41 void (*fix_mac_speed)(void *priv, unsigned int speed); 41 void (*fix_mac_speed)(void *priv, unsigned int speed);
42 void (*bus_setup)(void __iomem *ioaddr); 42 void (*bus_setup)(void __iomem *ioaddr);
43#ifdef CONFIG_STM_DRIVERS 43 int (*init)(struct platform_device *pdev);
44 struct stm_pad_config *pad_config; 44 void (*exit)(struct platform_device *pdev);
45#endif 45 void *custom_cfg;
46 void *bsp_priv; 46 void *bsp_priv;
47}; 47};
48 48
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 7ae27a47381..44842c8d38c 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -97,6 +97,12 @@ struct driver_info {
97 97
98#define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */ 98#define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */
99 99
100/*
101 * Indicates to usbnet, that USB driver accumulates multiple IP packets.
102 * Affects statistic (counters) and short packet handling.
103 */
104#define FLAG_MULTI_PACKET 0x1000
105
100 /* init device ... can sleep, or cause probe() failure */ 106 /* init device ... can sleep, or cause probe() failure */
101 int (*bind)(struct usbnet *, struct usb_interface *); 107 int (*bind)(struct usbnet *, struct usb_interface *);
102 108
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
index 4f902e1908a..bebb8efea0a 100644
--- a/include/linux/wl12xx.h
+++ b/include/linux/wl12xx.h
@@ -24,6 +24,14 @@
24#ifndef _LINUX_WL12XX_H 24#ifndef _LINUX_WL12XX_H
25#define _LINUX_WL12XX_H 25#define _LINUX_WL12XX_H
26 26
27/* The board reference clock values */
28enum {
29 WL12XX_REFCLOCK_19 = 0, /* 19.2 MHz */
30 WL12XX_REFCLOCK_26 = 1, /* 26 MHz */
31 WL12XX_REFCLOCK_38 = 2, /* 38.4 MHz */
32 WL12XX_REFCLOCK_54 = 3, /* 54 MHz */
33};
34
27struct wl12xx_platform_data { 35struct wl12xx_platform_data {
28 void (*set_power)(bool enable); 36 void (*set_power)(bool enable);
29 /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */ 37 /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index a9441249306..23710aa6a18 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -1,8 +1,6 @@
1#ifndef _ADDRCONF_H 1#ifndef _ADDRCONF_H
2#define _ADDRCONF_H 2#define _ADDRCONF_H
3 3
4#define RETRANS_TIMER HZ
5
6#define MAX_RTR_SOLICITATIONS 3 4#define MAX_RTR_SOLICITATIONS 3
7#define RTR_SOLICITATION_INTERVAL (4*HZ) 5#define RTR_SOLICITATION_INTERVAL (4*HZ)
8 6
diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
index 9402543fc20..e54f6396fa4 100644
--- a/include/net/caif/cfctrl.h
+++ b/include/net/caif/cfctrl.h
@@ -51,7 +51,7 @@ struct cfctrl_rsp {
51 void (*restart_rsp)(void); 51 void (*restart_rsp)(void);
52 void (*radioset_rsp)(void); 52 void (*radioset_rsp)(void);
53 void (*reject_rsp)(struct cflayer *layer, u8 linkid, 53 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
54 struct cflayer *client_layer);; 54 struct cflayer *client_layer);
55}; 55};
56 56
57/* Link Setup Parameters for CAIF-Links. */ 57/* Link Setup Parameters for CAIF-Links. */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 97b8b7c9b63..0663945cfa4 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -923,6 +923,7 @@ struct cfg80211_disassoc_request {
923 * @privacy: this is a protected network, keys will be configured 923 * @privacy: this is a protected network, keys will be configured
924 * after joining 924 * after joining
925 * @basic_rates: bitmap of basic rates to use when creating the IBSS 925 * @basic_rates: bitmap of basic rates to use when creating the IBSS
926 * @mcast_rate: per-band multicast rate index + 1 (0: disabled)
926 */ 927 */
927struct cfg80211_ibss_params { 928struct cfg80211_ibss_params {
928 u8 *ssid; 929 u8 *ssid;
@@ -934,6 +935,7 @@ struct cfg80211_ibss_params {
934 u32 basic_rates; 935 u32 basic_rates;
935 bool channel_fixed; 936 bool channel_fixed;
936 bool privacy; 937 bool privacy;
938 int mcast_rate[IEEE80211_NUM_BANDS];
937}; 939};
938 940
939/** 941/**
@@ -1304,6 +1306,9 @@ struct cfg80211_ops {
1304 void (*mgmt_frame_register)(struct wiphy *wiphy, 1306 void (*mgmt_frame_register)(struct wiphy *wiphy,
1305 struct net_device *dev, 1307 struct net_device *dev,
1306 u16 frame_type, bool reg); 1308 u16 frame_type, bool reg);
1309
1310 int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
1311 int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant);
1307}; 1312};
1308 1313
1309/* 1314/*
@@ -1321,13 +1326,14 @@ struct cfg80211_ops {
1321 * initiator is %REGDOM_SET_BY_CORE). 1326 * initiator is %REGDOM_SET_BY_CORE).
1322 * @WIPHY_FLAG_STRICT_REGULATORY: tells us the driver for this device will 1327 * @WIPHY_FLAG_STRICT_REGULATORY: tells us the driver for this device will
1323 * ignore regulatory domain settings until it gets its own regulatory 1328 * ignore regulatory domain settings until it gets its own regulatory
1324 * domain via its regulatory_hint(). After its gets its own regulatory 1329 * domain via its regulatory_hint() unless the regulatory hint is
1325 * domain it will only allow further regulatory domain settings to 1330 * from a country IE. After its gets its own regulatory domain it will
1326 * further enhance compliance. For example if channel 13 and 14 are 1331 * only allow further regulatory domain settings to further enhance
1327 * disabled by this regulatory domain no user regulatory domain can 1332 * compliance. For example if channel 13 and 14 are disabled by this
1328 * enable these channels at a later time. This can be used for devices 1333 * regulatory domain no user regulatory domain can enable these channels
1329 * which do not have calibration information gauranteed for frequencies 1334 * at a later time. This can be used for devices which do not have
1330 * or settings outside of its regulatory domain. 1335 * calibration information guaranteed for frequencies or settings
1336 * outside of its regulatory domain.
1331 * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure 1337 * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
1332 * that passive scan flags and beaconing flags may not be lifted by 1338 * that passive scan flags and beaconing flags may not be lifted by
1333 * cfg80211 due to regulatory beacon hints. For more information on beacon 1339 * cfg80211 due to regulatory beacon hints. For more information on beacon
@@ -2595,6 +2601,18 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
2595 enum nl80211_cqm_rssi_threshold_event rssi_event, 2601 enum nl80211_cqm_rssi_threshold_event rssi_event,
2596 gfp_t gfp); 2602 gfp_t gfp);
2597 2603
2604/**
2605 * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer
2606 * @dev: network device
2607 * @peer: peer's MAC address
2608 * @num_packets: how many packets were lost -- should be a fixed threshold
2609 * but probably no less than maybe 50, or maybe a throughput dependent
2610 * threshold (to account for temporary interference)
2611 * @gfp: context flags
2612 */
2613void cfg80211_cqm_pktloss_notify(struct net_device *dev,
2614 const u8 *peer, u32 num_packets, gfp_t gfp);
2615
2598/* Logging, debugging and troubleshooting/diagnostic helpers. */ 2616/* Logging, debugging and troubleshooting/diagnostic helpers. */
2599 2617
2600/* wiphy_printk helpers, similar to dev_printk */ 2618/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index 0916bbf3bdf..b9e32db03f2 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -5,13 +5,14 @@
5struct dn_dev; 5struct dn_dev;
6 6
7struct dn_ifaddr { 7struct dn_ifaddr {
8 struct dn_ifaddr *ifa_next; 8 struct dn_ifaddr __rcu *ifa_next;
9 struct dn_dev *ifa_dev; 9 struct dn_dev *ifa_dev;
10 __le16 ifa_local; 10 __le16 ifa_local;
11 __le16 ifa_address; 11 __le16 ifa_address;
12 __u8 ifa_flags; 12 __u8 ifa_flags;
13 __u8 ifa_scope; 13 __u8 ifa_scope;
14 char ifa_label[IFNAMSIZ]; 14 char ifa_label[IFNAMSIZ];
15 struct rcu_head rcu;
15}; 16};
16 17
17#define DN_DEV_S_RU 0 /* Run - working normally */ 18#define DN_DEV_S_RU 0 /* Run - working normally */
@@ -83,7 +84,7 @@ struct dn_dev_parms {
83 84
84 85
85struct dn_dev { 86struct dn_dev {
86 struct dn_ifaddr *ifa_list; 87 struct dn_ifaddr __rcu *ifa_list;
87 struct net_device *dev; 88 struct net_device *dev;
88 struct dn_dev_parms parms; 89 struct dn_dev_parms parms;
89 char use_long; 90 char use_long;
@@ -171,19 +172,27 @@ extern int unregister_dnaddr_notifier(struct notifier_block *nb);
171 172
172static inline int dn_dev_islocal(struct net_device *dev, __le16 addr) 173static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
173{ 174{
174 struct dn_dev *dn_db = dev->dn_ptr; 175 struct dn_dev *dn_db;
175 struct dn_ifaddr *ifa; 176 struct dn_ifaddr *ifa;
177 int res = 0;
176 178
179 rcu_read_lock();
180 dn_db = rcu_dereference(dev->dn_ptr);
177 if (dn_db == NULL) { 181 if (dn_db == NULL) {
178 printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n"); 182 printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
179 return 0; 183 goto out;
180 } 184 }
181 185
182 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) 186 for (ifa = rcu_dereference(dn_db->ifa_list);
183 if ((addr ^ ifa->ifa_local) == 0) 187 ifa != NULL;
184 return 1; 188 ifa = rcu_dereference(ifa->ifa_next))
185 189 if ((addr ^ ifa->ifa_local) == 0) {
186 return 0; 190 res = 1;
191 break;
192 }
193out:
194 rcu_read_unlock();
195 return res;
187} 196}
188 197
189#endif /* _NET_DN_DEV_H */ 198#endif /* _NET_DN_DEV_H */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index ccadab3aa3f..9b185df265f 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -80,6 +80,16 @@ struct dn_route {
80 unsigned rt_type; 80 unsigned rt_type;
81}; 81};
82 82
83static inline bool dn_is_input_route(struct dn_route *rt)
84{
85 return rt->fl.iif != 0;
86}
87
88static inline bool dn_is_output_route(struct dn_route *rt)
89{
90 return rt->fl.iif == 0;
91}
92
83extern void dn_route_init(void); 93extern void dn_route_init(void);
84extern void dn_route_cleanup(void); 94extern void dn_route_cleanup(void);
85 95
diff --git a/include/net/dst.h b/include/net/dst.h
index ffe9cb719c0..a5bd72646d6 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -94,10 +94,10 @@ struct dst_entry {
94 int __use; 94 int __use;
95 unsigned long lastuse; 95 unsigned long lastuse;
96 union { 96 union {
97 struct dst_entry *next; 97 struct dst_entry *next;
98 struct rtable __rcu *rt_next; 98 struct rtable __rcu *rt_next;
99 struct rt6_info *rt6_next; 99 struct rt6_info *rt6_next;
100 struct dn_route *dn_next; 100 struct dn_route __rcu *dn_next;
101 }; 101 };
102}; 102};
103 103
diff --git a/include/net/flow.h b/include/net/flow.h
index 0ac3fb5e097..7196e6864b8 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -67,6 +67,7 @@ struct flowi {
67 } dnports; 67 } dnports;
68 68
69 __be32 spi; 69 __be32 spi;
70 __be32 gre_key;
70 71
71 struct { 72 struct {
72 __u8 type; 73 __u8 type;
@@ -78,6 +79,7 @@ struct flowi {
78#define fl_icmp_code uli_u.icmpt.code 79#define fl_icmp_code uli_u.icmpt.code
79#define fl_ipsec_spi uli_u.spi 80#define fl_ipsec_spi uli_u.spi
80#define fl_mh_type uli_u.mht.type 81#define fl_mh_type uli_u.mht.type
82#define fl_gre_key uli_u.gre_key
81 __u32 secid; /* used by xfrm; see secid.txt */ 83 __u32 secid; /* used by xfrm; see secid.txt */
82} __attribute__((__aligned__(BITS_PER_LONG/8))); 84} __attribute__((__aligned__(BITS_PER_LONG/8)));
83 85
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index f95ff8d9aa4..04977eefb0e 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -89,10 +89,11 @@ struct ip6_sf_socklist {
89struct ipv6_mc_socklist { 89struct ipv6_mc_socklist {
90 struct in6_addr addr; 90 struct in6_addr addr;
91 int ifindex; 91 int ifindex;
92 struct ipv6_mc_socklist *next; 92 struct ipv6_mc_socklist __rcu *next;
93 rwlock_t sflock; 93 rwlock_t sflock;
94 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ 94 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
95 struct ip6_sf_socklist *sflist; 95 struct ip6_sf_socklist *sflist;
96 struct rcu_head rcu;
96}; 97};
97 98
98struct ip6_sf_list { 99struct ip6_sf_list {
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index aae08f68663..ff013505236 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -25,6 +25,9 @@ struct sockaddr;
25extern int inet6_csk_bind_conflict(const struct sock *sk, 25extern int inet6_csk_bind_conflict(const struct sock *sk,
26 const struct inet_bind_bucket *tb); 26 const struct inet_bind_bucket *tb);
27 27
28extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
29 const struct request_sock *req);
30
28extern struct request_sock *inet6_csk_search_req(const struct sock *sk, 31extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
29 struct request_sock ***prevp, 32 struct request_sock ***prevp,
30 const __be16 rport, 33 const __be16 rport,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index e4f494b42e0..6c93a56cc95 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -43,7 +43,7 @@ struct inet_connection_sock_af_ops {
43 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, 43 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
44 struct request_sock *req, 44 struct request_sock *req,
45 struct dst_entry *dst); 45 struct dst_entry *dst);
46 int (*remember_stamp)(struct sock *sk); 46 struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it);
47 u16 net_header_len; 47 u16 net_header_len;
48 u16 sockaddr_len; 48 u16 sockaddr_len;
49 int (*setsockopt)(struct sock *sk, int level, int optname, 49 int (*setsockopt)(struct sock *sk, int level, int optname,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 1989cfd7405..8945f9fb192 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -141,7 +141,7 @@ struct inet_sock {
141 nodefrag:1; 141 nodefrag:1;
142 int mc_index; 142 int mc_index;
143 __be32 mc_addr; 143 __be32 mc_addr;
144 struct ip_mc_socklist *mc_list; 144 struct ip_mc_socklist __rcu *mc_list;
145 struct { 145 struct {
146 unsigned int flags; 146 unsigned int flags;
147 unsigned int fragsize; 147 unsigned int fragsize;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index fe239bfe5f7..599d96e7411 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -11,12 +11,21 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/jiffies.h> 12#include <linux/jiffies.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <net/ipv6.h>
14#include <asm/atomic.h> 15#include <asm/atomic.h>
15 16
17struct inetpeer_addr {
18 union {
19 __be32 a4;
20 __be32 a6[4];
21 };
22 __u16 family;
23};
24
16struct inet_peer { 25struct inet_peer {
17 /* group together avl_left,avl_right,v4daddr to speedup lookups */ 26 /* group together avl_left,avl_right,v4daddr to speedup lookups */
18 struct inet_peer __rcu *avl_left, *avl_right; 27 struct inet_peer __rcu *avl_left, *avl_right;
19 __be32 v4daddr; /* peer's address */ 28 struct inetpeer_addr daddr;
20 __u32 avl_height; 29 __u32 avl_height;
21 struct list_head unused; 30 struct list_head unused;
22 __u32 dtime; /* the time of last use of not 31 __u32 dtime; /* the time of last use of not
@@ -26,7 +35,6 @@ struct inet_peer {
26 * Once inet_peer is queued for deletion (refcnt == -1), following fields 35 * Once inet_peer is queued for deletion (refcnt == -1), following fields
27 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp 36 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
28 * We can share memory with rcu_head to keep inet_peer small 37 * We can share memory with rcu_head to keep inet_peer small
29 * (less then 64 bytes)
30 */ 38 */
31 union { 39 union {
32 struct { 40 struct {
@@ -42,7 +50,25 @@ struct inet_peer {
42void inet_initpeers(void) __init; 50void inet_initpeers(void) __init;
43 51
44/* can be called with or without local BH being disabled */ 52/* can be called with or without local BH being disabled */
45struct inet_peer *inet_getpeer(__be32 daddr, int create); 53struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create);
54
55static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
56{
57 struct inetpeer_addr daddr;
58
59 daddr.a4 = v4daddr;
60 daddr.family = AF_INET;
61 return inet_getpeer(&daddr, create);
62}
63
64static inline struct inet_peer *inet_getpeer_v6(struct in6_addr *v6daddr, int create)
65{
66 struct inetpeer_addr daddr;
67
68 ipv6_addr_copy((struct in6_addr *)daddr.a6, v6daddr);
69 daddr.family = AF_INET6;
70 return inet_getpeer(&daddr, create);
71}
46 72
47/* can be called from BH context or outside */ 73/* can be called from BH context or outside */
48extern void inet_putpeer(struct inet_peer *p); 74extern void inet_putpeer(struct inet_peer *p);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 062a823d311..708ff7cb880 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -21,6 +21,7 @@
21#include <net/dst.h> 21#include <net/dst.h>
22#include <net/flow.h> 22#include <net/flow.h>
23#include <net/netlink.h> 23#include <net/netlink.h>
24#include <net/inetpeer.h>
24 25
25#ifdef CONFIG_IPV6_MULTIPLE_TABLES 26#ifdef CONFIG_IPV6_MULTIPLE_TABLES
26#define FIB6_TABLE_HASHSZ 256 27#define FIB6_TABLE_HASHSZ 256
@@ -109,6 +110,7 @@ struct rt6_info {
109 u32 rt6i_metric; 110 u32 rt6i_metric;
110 111
111 struct inet6_dev *rt6i_idev; 112 struct inet6_dev *rt6i_idev;
113 struct inet_peer *rt6i_peer;
112 114
113#ifdef CONFIG_XFRM 115#ifdef CONFIG_XFRM
114 u32 rt6i_flow_cache_genid; 116 u32 rt6i_flow_cache_genid;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 278312c95f9..e06e0ca1e91 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -3,7 +3,6 @@
3 3
4#define IP6_RT_PRIO_USER 1024 4#define IP6_RT_PRIO_USER 1024
5#define IP6_RT_PRIO_ADDRCONF 256 5#define IP6_RT_PRIO_ADDRCONF 256
6#define IP6_RT_PRIO_KERN 512
7 6
8struct route_info { 7struct route_info {
9 __u8 type; 8 __u8 type;
@@ -56,6 +55,18 @@ static inline unsigned int rt6_flags2srcprefs(int flags)
56 return (flags >> 3) & 7; 55 return (flags >> 3) & 7;
57} 56}
58 57
58extern void rt6_bind_peer(struct rt6_info *rt,
59 int create);
60
61static inline struct inet_peer *rt6_get_peer(struct rt6_info *rt)
62{
63 if (rt->rt6i_peer)
64 return rt->rt6i_peer;
65
66 rt6_bind_peer(rt, 0);
67 return rt->rt6i_peer;
68}
69
59extern void ip6_route_input(struct sk_buff *skb); 70extern void ip6_route_input(struct sk_buff *skb);
60 71
61extern struct dst_entry * ip6_route_output(struct net *net, 72extern struct dst_entry * ip6_route_output(struct net *net,
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 9fdf982d128..eaa4affd40c 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -97,6 +97,20 @@ enum ieee80211_max_queues {
97}; 97};
98 98
99/** 99/**
100 * enum ieee80211_ac_numbers - AC numbers as used in mac80211
101 * @IEEE80211_AC_VO: voice
102 * @IEEE80211_AC_VI: video
103 * @IEEE80211_AC_BE: best effort
104 * @IEEE80211_AC_BK: background
105 */
106enum ieee80211_ac_numbers {
107 IEEE80211_AC_VO = 0,
108 IEEE80211_AC_VI = 1,
109 IEEE80211_AC_BE = 2,
110 IEEE80211_AC_BK = 3,
111};
112
113/**
100 * struct ieee80211_tx_queue_params - transmit queue configuration 114 * struct ieee80211_tx_queue_params - transmit queue configuration
101 * 115 *
102 * The information provided in this structure is required for QoS 116 * The information provided in this structure is required for QoS
@@ -205,6 +219,7 @@ enum ieee80211_bss_change {
205 * @basic_rates: bitmap of basic rates, each bit stands for an 219 * @basic_rates: bitmap of basic rates, each bit stands for an
206 * index into the rate table configured by the driver in 220 * index into the rate table configured by the driver in
207 * the current band. 221 * the current band.
222 * @mcast_rate: per-band multicast rate index + 1 (0: disabled)
208 * @bssid: The BSSID for this BSS 223 * @bssid: The BSSID for this BSS
209 * @enable_beacon: whether beaconing should be enabled or not 224 * @enable_beacon: whether beaconing should be enabled or not
210 * @channel_type: Channel type for this BSS -- the hardware might be 225 * @channel_type: Channel type for this BSS -- the hardware might be
@@ -244,6 +259,7 @@ struct ieee80211_bss_conf {
244 u16 assoc_capability; 259 u16 assoc_capability;
245 u64 timestamp; 260 u64 timestamp;
246 u32 basic_rates; 261 u32 basic_rates;
262 int mcast_rate[IEEE80211_NUM_BANDS];
247 u16 ht_operation_mode; 263 u16 ht_operation_mode;
248 s32 cqm_rssi_thold; 264 s32 cqm_rssi_thold;
249 u32 cqm_rssi_hyst; 265 u32 cqm_rssi_hyst;
@@ -1652,6 +1668,11 @@ enum ieee80211_ampdu_mlme_action {
1652 * and IV16) for the given key from hardware. 1668 * and IV16) for the given key from hardware.
1653 * The callback must be atomic. 1669 * The callback must be atomic.
1654 * 1670 *
1671 * @set_frag_threshold: Configuration of fragmentation threshold. Assign this
1672 * if the device does fragmentation by itself; if this callback is
1673 * implemented then the stack will not do fragmentation.
1674 * The callback can sleep.
1675 *
1655 * @set_rts_threshold: Configuration of RTS threshold (if device needs it) 1676 * @set_rts_threshold: Configuration of RTS threshold (if device needs it)
1656 * The callback can sleep. 1677 * The callback can sleep.
1657 * 1678 *
@@ -1724,6 +1745,13 @@ enum ieee80211_ampdu_mlme_action {
1724 * completion of the channel switch. 1745 * completion of the channel switch.
1725 * 1746 *
1726 * @napi_poll: Poll Rx queue for incoming data frames. 1747 * @napi_poll: Poll Rx queue for incoming data frames.
1748 *
1749 * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
1750 * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
1751 * reject TX/RX mask combinations they cannot support by returning -EINVAL
1752 * (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
1753 *
1754 * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
1727 */ 1755 */
1728struct ieee80211_ops { 1756struct ieee80211_ops {
1729 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 1757 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -1765,6 +1793,7 @@ struct ieee80211_ops {
1765 struct ieee80211_low_level_stats *stats); 1793 struct ieee80211_low_level_stats *stats);
1766 void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx, 1794 void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx,
1767 u32 *iv32, u16 *iv16); 1795 u32 *iv32, u16 *iv16);
1796 int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
1768 int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value); 1797 int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
1769 int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1798 int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1770 struct ieee80211_sta *sta); 1799 struct ieee80211_sta *sta);
@@ -1793,6 +1822,8 @@ struct ieee80211_ops {
1793 void (*channel_switch)(struct ieee80211_hw *hw, 1822 void (*channel_switch)(struct ieee80211_hw *hw,
1794 struct ieee80211_channel_switch *ch_switch); 1823 struct ieee80211_channel_switch *ch_switch);
1795 int (*napi_poll)(struct ieee80211_hw *hw, int budget); 1824 int (*napi_poll)(struct ieee80211_hw *hw, int budget);
1825 int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
1826 int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
1796}; 1827};
1797 1828
1798/** 1829/**
@@ -2501,6 +2532,21 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
2501 struct ieee80211_sta *pubsta, bool block); 2532 struct ieee80211_sta *pubsta, bool block);
2502 2533
2503/** 2534/**
2535 * ieee80211_ap_probereq_get - retrieve a Probe Request template
2536 * @hw: pointer obtained from ieee80211_alloc_hw().
2537 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2538 *
2539 * Creates a Probe Request template which can, for example, be uploaded to
2540 * hardware. The template is filled with bssid, ssid and supported rate
2541 * information. This function must only be called from within the
2542 * .bss_info_changed callback function and only in managed mode. The function
2543 * is only useful when the interface is associated, otherwise it will return
2544 * NULL.
2545 */
2546struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
2547 struct ieee80211_vif *vif);
2548
2549/**
2504 * ieee80211_beacon_loss - inform hardware does not receive beacons 2550 * ieee80211_beacon_loss - inform hardware does not receive beacons
2505 * 2551 *
2506 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 2552 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -2640,7 +2686,7 @@ enum rate_control_changed {
2640 * @rate_idx_mask: user-requested rate mask (not MCS for now) 2686 * @rate_idx_mask: user-requested rate mask (not MCS for now)
2641 * @skb: the skb that will be transmitted, the control information in it needs 2687 * @skb: the skb that will be transmitted, the control information in it needs
2642 * to be filled in 2688 * to be filled in
2643 * @ap: whether this frame is sent out in AP mode 2689 * @bss: whether this frame is sent out in AP or IBSS mode
2644 */ 2690 */
2645struct ieee80211_tx_rate_control { 2691struct ieee80211_tx_rate_control {
2646 struct ieee80211_hw *hw; 2692 struct ieee80211_hw *hw;
@@ -2651,7 +2697,7 @@ struct ieee80211_tx_rate_control {
2651 bool rts, short_preamble; 2697 bool rts, short_preamble;
2652 u8 max_rate_idx; 2698 u8 max_rate_idx;
2653 u32 rate_idx_mask; 2699 u32 rate_idx_mask;
2654 bool ap; 2700 bool bss;
2655}; 2701};
2656 2702
2657struct rate_control_ops { 2703struct rate_control_ops {
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 895997bc2ea..e0e594f8e9d 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -42,9 +42,6 @@ enum {
42#define ND_REACHABLE_TIME (30*HZ) 42#define ND_REACHABLE_TIME (30*HZ)
43#define ND_RETRANS_TIMER HZ 43#define ND_RETRANS_TIMER HZ
44 44
45#define ND_MIN_RANDOM_FACTOR (1/2)
46#define ND_MAX_RANDOM_FACTOR (3/2)
47
48#ifdef __KERNEL__ 45#ifdef __KERNEL__
49 46
50#include <linux/compiler.h> 47#include <linux/compiler.h>
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 6beb1ffc2b7..4014b623880 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -96,16 +96,16 @@ struct neighbour {
96 struct neigh_parms *parms; 96 struct neigh_parms *parms;
97 unsigned long confirmed; 97 unsigned long confirmed;
98 unsigned long updated; 98 unsigned long updated;
99 __u8 flags; 99 rwlock_t lock;
100 __u8 nud_state;
101 __u8 type;
102 __u8 dead;
103 atomic_t refcnt; 100 atomic_t refcnt;
104 struct sk_buff_head arp_queue; 101 struct sk_buff_head arp_queue;
105 struct timer_list timer; 102 struct timer_list timer;
106 unsigned long used; 103 unsigned long used;
107 atomic_t probes; 104 atomic_t probes;
108 rwlock_t lock; 105 __u8 flags;
106 __u8 nud_state;
107 __u8 type;
108 __u8 dead;
109 seqlock_t ha_lock; 109 seqlock_t ha_lock;
110 unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; 110 unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
111 struct hh_cache *hh; 111 struct hh_cache *hh;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 9801c55de5d..373f1a900cf 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -225,13 +225,15 @@ extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb,
225 u32 pid, unsigned int group, int report, 225 u32 pid, unsigned int group, int report,
226 gfp_t flags); 226 gfp_t flags);
227 227
228extern int nla_validate(struct nlattr *head, int len, int maxtype, 228extern int nla_validate(const struct nlattr *head,
229 int len, int maxtype,
229 const struct nla_policy *policy); 230 const struct nla_policy *policy);
230extern int nla_parse(struct nlattr *tb[], int maxtype, 231extern int nla_parse(struct nlattr **tb, int maxtype,
231 struct nlattr *head, int len, 232 const struct nlattr *head, int len,
232 const struct nla_policy *policy); 233 const struct nla_policy *policy);
233extern int nla_policy_len(const struct nla_policy *, int); 234extern int nla_policy_len(const struct nla_policy *, int);
234extern struct nlattr * nla_find(struct nlattr *head, int len, int attrtype); 235extern struct nlattr * nla_find(const struct nlattr *head,
236 int len, int attrtype);
235extern size_t nla_strlcpy(char *dst, const struct nlattr *nla, 237extern size_t nla_strlcpy(char *dst, const struct nlattr *nla,
236 size_t dstsize); 238 size_t dstsize);
237extern int nla_memcpy(void *dest, const struct nlattr *src, int count); 239extern int nla_memcpy(void *dest, const struct nlattr *src, int count);
@@ -346,7 +348,8 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
346 * Returns the next netlink message in the message stream and 348 * Returns the next netlink message in the message stream and
347 * decrements remaining by the size of the current message. 349 * decrements remaining by the size of the current message.
348 */ 350 */
349static inline struct nlmsghdr *nlmsg_next(struct nlmsghdr *nlh, int *remaining) 351static inline struct nlmsghdr *
352nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
350{ 353{
351 int totlen = NLMSG_ALIGN(nlh->nlmsg_len); 354 int totlen = NLMSG_ALIGN(nlh->nlmsg_len);
352 355
@@ -398,7 +401,8 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
398 * @maxtype: maximum attribute type to be expected 401 * @maxtype: maximum attribute type to be expected
399 * @policy: validation policy 402 * @policy: validation policy
400 */ 403 */
401static inline int nlmsg_validate(struct nlmsghdr *nlh, int hdrlen, int maxtype, 404static inline int nlmsg_validate(const struct nlmsghdr *nlh,
405 int hdrlen, int maxtype,
402 const struct nla_policy *policy) 406 const struct nla_policy *policy)
403{ 407{
404 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) 408 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
@@ -727,7 +731,8 @@ static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
727 * 731 *
728 * Returns the first attribute which matches the specified type. 732 * Returns the first attribute which matches the specified type.
729 */ 733 */
730static inline struct nlattr *nla_find_nested(struct nlattr *nla, int attrtype) 734static inline struct nlattr *
735nla_find_nested(const struct nlattr *nla, int attrtype)
731{ 736{
732 return nla_find(nla_data(nla), nla_len(nla), attrtype); 737 return nla_find(nla_data(nla), nla_len(nla), attrtype);
733} 738}
@@ -1032,7 +1037,7 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
1032 * 1037 *
1033 * Returns 0 on success or a negative error code. 1038 * Returns 0 on success or a negative error code.
1034 */ 1039 */
1035static inline int nla_validate_nested(struct nlattr *start, int maxtype, 1040static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
1036 const struct nla_policy *policy) 1041 const struct nla_policy *policy)
1037{ 1042{
1038 return nla_validate(nla_data(start), nla_len(start), maxtype, policy); 1043 return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 81a31c0db3e..3419bf5cd15 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -30,7 +30,7 @@ struct net_generic {
30 void *ptr[0]; 30 void *ptr[0];
31}; 31};
32 32
33static inline void *net_generic(struct net *net, int id) 33static inline void *net_generic(const struct net *net, int id)
34{ 34{
35 struct net_generic *ng; 35 struct net_generic *ng;
36 void *ptr; 36 void *ptr;
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 9e103a4e91e..356d6e3dc20 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -43,6 +43,12 @@ enum environment_cap {
43 * @intersect: indicates whether the wireless core should intersect 43 * @intersect: indicates whether the wireless core should intersect
44 * the requested regulatory domain with the presently set regulatory 44 * the requested regulatory domain with the presently set regulatory
45 * domain. 45 * domain.
46 * @processed: indicates whether or not this requests has already been
47 * processed. When the last request is processed it means that the
48 * currently regulatory domain set on cfg80211 is updated from
49 * CRDA and can be used by other regulatory requests. When a
50 * the last request is not yet processed we must yield until it
51 * is processed before processing any new requests.
46 * @country_ie_checksum: checksum of the last processed and accepted 52 * @country_ie_checksum: checksum of the last processed and accepted
47 * country IE 53 * country IE
48 * @country_ie_env: lets us know if the AP is telling us we are outdoor, 54 * @country_ie_env: lets us know if the AP is telling us we are outdoor,
@@ -54,6 +60,7 @@ struct regulatory_request {
54 enum nl80211_reg_initiator initiator; 60 enum nl80211_reg_initiator initiator;
55 char alpha2[2]; 61 char alpha2[2];
56 bool intersect; 62 bool intersect;
63 bool processed;
57 enum environment_cap country_ie_env; 64 enum environment_cap country_ie_env;
58 struct list_head list; 65 struct list_head list;
59}; 66};
diff --git a/include/net/route.h b/include/net/route.h
index 7e5e73bfa4d..b8c1f7703fc 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -55,8 +55,6 @@ struct rtable {
55 /* Cache lookup keys */ 55 /* Cache lookup keys */
56 struct flowi fl; 56 struct flowi fl;
57 57
58 struct in_device *idev;
59
60 int rt_genid; 58 int rt_genid;
61 unsigned rt_flags; 59 unsigned rt_flags;
62 __u16 rt_type; 60 __u16 rt_type;
@@ -73,6 +71,16 @@ struct rtable {
73 struct inet_peer *peer; /* long-living peer info */ 71 struct inet_peer *peer; /* long-living peer info */
74}; 72};
75 73
74static inline bool rt_is_input_route(struct rtable *rt)
75{
76 return rt->fl.iif != 0;
77}
78
79static inline bool rt_is_output_route(struct rtable *rt)
80{
81 return rt->fl.iif == 0;
82}
83
76struct ip_rt_acct { 84struct ip_rt_acct {
77 __u32 o_bytes; 85 __u32 o_bytes;
78 __u32 o_packets; 86 __u32 o_packets;
@@ -161,14 +169,12 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
161{ 169{
162 struct flowi fl = { .oif = oif, 170 struct flowi fl = { .oif = oif,
163 .mark = sk->sk_mark, 171 .mark = sk->sk_mark,
164 .nl_u = { .ip4_u = { .daddr = dst, 172 .fl4_dst = dst,
165 .saddr = src, 173 .fl4_src = src,
166 .tos = tos } }, 174 .fl4_tos = tos,
167 .proto = protocol, 175 .proto = protocol,
168 .uli_u = { .ports = 176 .fl_ip_sport = sport,
169 { .sport = sport, 177 .fl_ip_dport = dport };
170 .dport = dport } } };
171
172 int err; 178 int err;
173 struct net *net = sock_net(sk); 179 struct net *net = sock_net(sk);
174 180
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index e013c68bfb0..4093ca78cf6 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -83,6 +83,41 @@ extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
83extern int rtnl_link_register(struct rtnl_link_ops *ops); 83extern int rtnl_link_register(struct rtnl_link_ops *ops);
84extern void rtnl_link_unregister(struct rtnl_link_ops *ops); 84extern void rtnl_link_unregister(struct rtnl_link_ops *ops);
85 85
86/**
87 * struct rtnl_af_ops - rtnetlink address family operations
88 *
89 * @list: Used internally
90 * @family: Address family
91 * @fill_link_af: Function to fill IFLA_AF_SPEC with address family
92 * specific netlink attributes.
93 * @get_link_af_size: Function to calculate size of address family specific
94 * netlink attributes exlusive the container attribute.
95 * @validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr
96 * for invalid configuration settings.
97 * @set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
98 * net_device accordingly.
99 */
100struct rtnl_af_ops {
101 struct list_head list;
102 int family;
103
104 int (*fill_link_af)(struct sk_buff *skb,
105 const struct net_device *dev);
106 size_t (*get_link_af_size)(const struct net_device *dev);
107
108 int (*validate_link_af)(const struct net_device *dev,
109 const struct nlattr *attr);
110 int (*set_link_af)(struct net_device *dev,
111 const struct nlattr *attr);
112};
113
114extern int __rtnl_af_register(struct rtnl_af_ops *ops);
115extern void __rtnl_af_unregister(struct rtnl_af_ops *ops);
116
117extern int rtnl_af_register(struct rtnl_af_ops *ops);
118extern void rtnl_af_unregister(struct rtnl_af_ops *ops);
119
120
86extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]); 121extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
87extern struct net_device *rtnl_create_link(struct net *src_net, struct net *net, 122extern struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
88 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]); 123 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]);
diff --git a/include/net/scm.h b/include/net/scm.h
index 31656506d96..745460fa2f0 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -10,11 +10,12 @@
10/* Well, we should have at least one descriptor open 10/* Well, we should have at least one descriptor open
11 * to accept passed FDs 8) 11 * to accept passed FDs 8)
12 */ 12 */
13#define SCM_MAX_FD 255 13#define SCM_MAX_FD 253
14 14
15struct scm_fp_list { 15struct scm_fp_list {
16 struct list_head list; 16 struct list_head list;
17 int count; 17 short count;
18 short max;
18 struct file *fp[SCM_MAX_FD]; 19 struct file *fp[SCM_MAX_FD];
19}; 20};
20 21
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 2c55a7ea20a..c01dc99def0 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -111,9 +111,6 @@ typedef enum {
111 SCTP_CMD_LAST 111 SCTP_CMD_LAST
112} sctp_verb_t; 112} sctp_verb_t;
113 113
114#define SCTP_CMD_MAX (SCTP_CMD_LAST - 1)
115#define SCTP_CMD_NUM_VERBS (SCTP_CMD_MAX + 1)
116
117/* How many commands can you put in an sctp_cmd_seq_t? 114/* How many commands can you put in an sctp_cmd_seq_t?
118 * This is a rather arbitrary number, ideally derived from a careful 115 * This is a rather arbitrary number, ideally derived from a careful
119 * analysis of the state functions, but in reality just taken from 116 * analysis of the state functions, but in reality just taken from
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 63908840eef..c70d8ccc55c 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -61,7 +61,6 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
61 * symbols. CIDs are dense through SCTP_CID_BASE_MAX. 61 * symbols. CIDs are dense through SCTP_CID_BASE_MAX.
62 */ 62 */
63#define SCTP_CID_BASE_MAX SCTP_CID_SHUTDOWN_COMPLETE 63#define SCTP_CID_BASE_MAX SCTP_CID_SHUTDOWN_COMPLETE
64#define SCTP_CID_MAX SCTP_CID_ASCONF_ACK
65 64
66#define SCTP_NUM_BASE_CHUNK_TYPES (SCTP_CID_BASE_MAX + 1) 65#define SCTP_NUM_BASE_CHUNK_TYPES (SCTP_CID_BASE_MAX + 1)
67 66
@@ -86,9 +85,6 @@ typedef enum {
86 85
87} sctp_event_t; 86} sctp_event_t;
88 87
89#define SCTP_EVENT_T_MAX SCTP_EVENT_T_PRIMITIVE
90#define SCTP_EVENT_T_NUM (SCTP_EVENT_T_MAX + 1)
91
92/* As a convenience for the state machine, we append SCTP_EVENT_* and 88/* As a convenience for the state machine, we append SCTP_EVENT_* and
93 * SCTP_ULP_* to the list of possible chunks. 89 * SCTP_ULP_* to the list of possible chunks.
94 */ 90 */
@@ -162,9 +158,6 @@ SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive)
162 - (unsigned long)(c->chunk_hdr)\ 158 - (unsigned long)(c->chunk_hdr)\
163 - sizeof(sctp_data_chunk_t))) 159 - sizeof(sctp_data_chunk_t)))
164 160
165#define SCTP_MAX_ERROR_CAUSE SCTP_ERROR_NONEXIST_IP
166#define SCTP_NUM_ERROR_CAUSE 10
167
168/* Internal error codes */ 161/* Internal error codes */
169typedef enum { 162typedef enum {
170 163
@@ -266,7 +259,6 @@ enum { SCTP_ARBITRARY_COOKIE_ECHO_LEN = 200 };
266#define SCTP_TSN_MAP_INITIAL BITS_PER_LONG 259#define SCTP_TSN_MAP_INITIAL BITS_PER_LONG
267#define SCTP_TSN_MAP_INCREMENT SCTP_TSN_MAP_INITIAL 260#define SCTP_TSN_MAP_INCREMENT SCTP_TSN_MAP_INITIAL
268#define SCTP_TSN_MAP_SIZE 4096 261#define SCTP_TSN_MAP_SIZE 4096
269#define SCTP_TSN_MAX_GAP 65535
270 262
271/* We will not record more than this many duplicate TSNs between two 263/* We will not record more than this many duplicate TSNs between two
272 * SACKs. The minimum PMTU is 576. Remove all the headers and there 264 * SACKs. The minimum PMTU is 576. Remove all the headers and there
@@ -301,9 +293,6 @@ enum { SCTP_MAX_GABS = 16 };
301 293
302#define SCTP_CLOCK_GRANULARITY 1 /* 1 jiffy */ 294#define SCTP_CLOCK_GRANULARITY 1 /* 1 jiffy */
303 295
304#define SCTP_DEF_MAX_INIT 6
305#define SCTP_DEF_MAX_SEND 10
306
307#define SCTP_DEFAULT_COOKIE_LIFE (60 * 1000) /* 60 seconds */ 296#define SCTP_DEFAULT_COOKIE_LIFE (60 * 1000) /* 60 seconds */
308 297
309#define SCTP_DEFAULT_MINWINDOW 1500 /* default minimum rwnd size */ 298#define SCTP_DEFAULT_MINWINDOW 1500 /* default minimum rwnd size */
@@ -317,9 +306,6 @@ enum { SCTP_MAX_GABS = 16 };
317 */ 306 */
318#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */ 307#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
319#define SCTP_HOW_MANY_SECRETS 2 /* How many secrets I keep */ 308#define SCTP_HOW_MANY_SECRETS 2 /* How many secrets I keep */
320#define SCTP_HOW_LONG_COOKIE_LIVE 3600 /* How many seconds the current
321 * secret will live?
322 */
323#define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */ 309#define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */
324 310
325#define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */ 311#define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 69fef4fb79c..cc9185ca8fd 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -261,8 +261,6 @@ extern struct sctp_globals {
261#define sctp_assoc_hashsize (sctp_globals.assoc_hashsize) 261#define sctp_assoc_hashsize (sctp_globals.assoc_hashsize)
262#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable) 262#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable)
263#define sctp_port_hashsize (sctp_globals.port_hashsize) 263#define sctp_port_hashsize (sctp_globals.port_hashsize)
264#define sctp_port_rover (sctp_globals.port_rover)
265#define sctp_port_alloc_lock (sctp_globals.port_alloc_lock)
266#define sctp_port_hashtable (sctp_globals.port_hashtable) 264#define sctp_port_hashtable (sctp_globals.port_hashtable)
267#define sctp_local_addr_list (sctp_globals.local_addr_list) 265#define sctp_local_addr_list (sctp_globals.local_addr_list)
268#define sctp_local_addr_lock (sctp_globals.addr_list_lock) 266#define sctp_local_addr_lock (sctp_globals.addr_list_lock)
diff --git a/include/net/snmp.h b/include/net/snmp.h
index a0e61806d48..762e2abce88 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -60,9 +60,7 @@ struct ipstats_mib {
60}; 60};
61 61
62/* ICMP */ 62/* ICMP */
63#define ICMP_MIB_DUMMY __ICMP_MIB_MAX 63#define ICMP_MIB_MAX __ICMP_MIB_MAX
64#define ICMP_MIB_MAX (__ICMP_MIB_MAX + 1)
65
66struct icmp_mib { 64struct icmp_mib {
67 unsigned long mibs[ICMP_MIB_MAX]; 65 unsigned long mibs[ICMP_MIB_MAX];
68}; 66};
diff --git a/include/net/sock.h b/include/net/sock.h
index 659d968d95c..3482004e5c2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -57,7 +57,7 @@
57#include <linux/rculist_nulls.h> 57#include <linux/rculist_nulls.h>
58#include <linux/poll.h> 58#include <linux/poll.h>
59 59
60#include <asm/atomic.h> 60#include <linux/atomic.h>
61#include <net/dst.h> 61#include <net/dst.h>
62#include <net/checksum.h> 62#include <net/checksum.h>
63 63
@@ -241,59 +241,67 @@ struct sock {
241#define sk_bind_node __sk_common.skc_bind_node 241#define sk_bind_node __sk_common.skc_bind_node
242#define sk_prot __sk_common.skc_prot 242#define sk_prot __sk_common.skc_prot
243#define sk_net __sk_common.skc_net 243#define sk_net __sk_common.skc_net
244 kmemcheck_bitfield_begin(flags);
245 unsigned int sk_shutdown : 2,
246 sk_no_check : 2,
247 sk_userlocks : 4,
248 sk_protocol : 8,
249 sk_type : 16;
250 kmemcheck_bitfield_end(flags);
251 int sk_rcvbuf;
252 socket_lock_t sk_lock; 244 socket_lock_t sk_lock;
245 struct sk_buff_head sk_receive_queue;
253 /* 246 /*
254 * The backlog queue is special, it is always used with 247 * The backlog queue is special, it is always used with
255 * the per-socket spinlock held and requires low latency 248 * the per-socket spinlock held and requires low latency
256 * access. Therefore we special case it's implementation. 249 * access. Therefore we special case it's implementation.
250 * Note : rmem_alloc is in this structure to fill a hole
251 * on 64bit arches, not because its logically part of
252 * backlog.
257 */ 253 */
258 struct { 254 struct {
259 struct sk_buff *head; 255 atomic_t rmem_alloc;
260 struct sk_buff *tail; 256 int len;
261 int len; 257 struct sk_buff *head;
258 struct sk_buff *tail;
262 } sk_backlog; 259 } sk_backlog;
260#define sk_rmem_alloc sk_backlog.rmem_alloc
261 int sk_forward_alloc;
262#ifdef CONFIG_RPS
263 __u32 sk_rxhash;
264#endif
265 atomic_t sk_drops;
266 int sk_rcvbuf;
267
268 struct sk_filter __rcu *sk_filter;
263 struct socket_wq *sk_wq; 269 struct socket_wq *sk_wq;
264 struct dst_entry *sk_dst_cache; 270
271#ifdef CONFIG_NET_DMA
272 struct sk_buff_head sk_async_wait_queue;
273#endif
274
265#ifdef CONFIG_XFRM 275#ifdef CONFIG_XFRM
266 struct xfrm_policy *sk_policy[2]; 276 struct xfrm_policy *sk_policy[2];
267#endif 277#endif
278 unsigned long sk_flags;
279 struct dst_entry *sk_dst_cache;
268 spinlock_t sk_dst_lock; 280 spinlock_t sk_dst_lock;
269 atomic_t sk_rmem_alloc;
270 atomic_t sk_wmem_alloc; 281 atomic_t sk_wmem_alloc;
271 atomic_t sk_omem_alloc; 282 atomic_t sk_omem_alloc;
272 int sk_sndbuf; 283 int sk_sndbuf;
273 struct sk_buff_head sk_receive_queue;
274 struct sk_buff_head sk_write_queue; 284 struct sk_buff_head sk_write_queue;
275#ifdef CONFIG_NET_DMA 285 kmemcheck_bitfield_begin(flags);
276 struct sk_buff_head sk_async_wait_queue; 286 unsigned int sk_shutdown : 2,
277#endif 287 sk_no_check : 2,
288 sk_userlocks : 4,
289 sk_protocol : 8,
290 sk_type : 16;
291 kmemcheck_bitfield_end(flags);
278 int sk_wmem_queued; 292 int sk_wmem_queued;
279 int sk_forward_alloc;
280 gfp_t sk_allocation; 293 gfp_t sk_allocation;
281 int sk_route_caps; 294 int sk_route_caps;
282 int sk_route_nocaps; 295 int sk_route_nocaps;
283 int sk_gso_type; 296 int sk_gso_type;
284 unsigned int sk_gso_max_size; 297 unsigned int sk_gso_max_size;
285 int sk_rcvlowat; 298 int sk_rcvlowat;
286#ifdef CONFIG_RPS
287 __u32 sk_rxhash;
288#endif
289 unsigned long sk_flags;
290 unsigned long sk_lingertime; 299 unsigned long sk_lingertime;
291 struct sk_buff_head sk_error_queue; 300 struct sk_buff_head sk_error_queue;
292 struct proto *sk_prot_creator; 301 struct proto *sk_prot_creator;
293 rwlock_t sk_callback_lock; 302 rwlock_t sk_callback_lock;
294 int sk_err, 303 int sk_err,
295 sk_err_soft; 304 sk_err_soft;
296 atomic_t sk_drops;
297 unsigned short sk_ack_backlog; 305 unsigned short sk_ack_backlog;
298 unsigned short sk_max_ack_backlog; 306 unsigned short sk_max_ack_backlog;
299 __u32 sk_priority; 307 __u32 sk_priority;
@@ -301,7 +309,6 @@ struct sock {
301 const struct cred *sk_peer_cred; 309 const struct cred *sk_peer_cred;
302 long sk_rcvtimeo; 310 long sk_rcvtimeo;
303 long sk_sndtimeo; 311 long sk_sndtimeo;
304 struct sk_filter __rcu *sk_filter;
305 void *sk_protinfo; 312 void *sk_protinfo;
306 struct timer_list sk_timer; 313 struct timer_list sk_timer;
307 ktime_t sk_stamp; 314 ktime_t sk_stamp;
@@ -509,9 +516,6 @@ static __inline__ void sk_add_bind_node(struct sock *sk,
509#define sk_nulls_for_each_from(__sk, node) \ 516#define sk_nulls_for_each_from(__sk, node) \
510 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ 517 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
511 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) 518 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
512#define sk_for_each_continue(__sk, node) \
513 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
514 hlist_for_each_entry_continue(__sk, node, sk_node)
515#define sk_for_each_safe(__sk, node, tmp, list) \ 519#define sk_for_each_safe(__sk, node, tmp, list) \
516 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) 520 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
517#define sk_for_each_bound(__sk, node, list) \ 521#define sk_for_each_bound(__sk, node, list) \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e36c874c7fb..3f227baee4b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -100,12 +100,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
100#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a 100#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
101 * connection: ~180sec is RFC minimum */ 101 * connection: ~180sec is RFC minimum */
102 102
103
104#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
105 * socket. 7 is ~50sec-16min.
106 */
107
108
109#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT 103#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
110 * state, about 60 seconds */ 104 * state, about 60 seconds */
111#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN 105#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
@@ -312,7 +306,8 @@ extern void tcp_shutdown (struct sock *sk, int how);
312 306
313extern int tcp_v4_rcv(struct sk_buff *skb); 307extern int tcp_v4_rcv(struct sk_buff *skb);
314 308
315extern int tcp_v4_remember_stamp(struct sock *sk); 309extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
310extern void *tcp_v4_tw_get_peer(struct sock *sk);
316extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); 311extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
317extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 312extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
318 size_t size); 313 size_t size);
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 97c3b14da55..053b3cf2c66 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -21,6 +21,7 @@ struct timewait_sock_ops {
21 int (*twsk_unique)(struct sock *sk, 21 int (*twsk_unique)(struct sock *sk,
22 struct sock *sktw, void *twp); 22 struct sock *sktw, void *twp);
23 void (*twsk_destructor)(struct sock *sk); 23 void (*twsk_destructor)(struct sock *sk);
24 void *(*twsk_getpeer)(struct sock *sk);
24}; 25};
25 26
26static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) 27static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -39,4 +40,11 @@ static inline void twsk_destructor(struct sock *sk)
39 sk->sk_prot->twsk_prot->twsk_destructor(sk); 40 sk->sk_prot->twsk_prot->twsk_destructor(sk);
40} 41}
41 42
43static inline void *twsk_getpeer(struct sock *sk)
44{
45 if (sk->sk_prot->twsk_prot->twsk_getpeer)
46 return sk->sk_prot->twsk_prot->twsk_getpeer(sk);
47 return NULL;
48}
49
42#endif /* _TIMEWAIT_SOCK_H */ 50#endif /* _TIMEWAIT_SOCK_H */
diff --git a/include/net/tipc/tipc.h b/include/net/tipc/tipc.h
deleted file mode 100644
index 1e0645e1eed..00000000000
--- a/include/net/tipc/tipc.h
+++ /dev/null
@@ -1,186 +0,0 @@
1/*
2 * include/net/tipc/tipc.h: Main include file for TIPC users
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005,2010 Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _NET_TIPC_H_
38#define _NET_TIPC_H_
39
40#ifdef __KERNEL__
41
42#include <linux/tipc.h>
43#include <linux/skbuff.h>
44
45/*
46 * Native API
47 */
48
49/*
50 * TIPC operating mode routines
51 */
52
53#define TIPC_NOT_RUNNING 0
54#define TIPC_NODE_MODE 1
55#define TIPC_NET_MODE 2
56
57typedef void (*tipc_mode_event)(void *usr_handle, int mode, u32 addr);
58
59int tipc_attach(unsigned int *userref, tipc_mode_event, void *usr_handle);
60
61void tipc_detach(unsigned int userref);
62
63/*
64 * TIPC port manipulation routines
65 */
66
67typedef void (*tipc_msg_err_event) (void *usr_handle,
68 u32 portref,
69 struct sk_buff **buf,
70 unsigned char const *data,
71 unsigned int size,
72 int reason,
73 struct tipc_portid const *attmpt_destid);
74
75typedef void (*tipc_named_msg_err_event) (void *usr_handle,
76 u32 portref,
77 struct sk_buff **buf,
78 unsigned char const *data,
79 unsigned int size,
80 int reason,
81 struct tipc_name_seq const *attmpt_dest);
82
83typedef void (*tipc_conn_shutdown_event) (void *usr_handle,
84 u32 portref,
85 struct sk_buff **buf,
86 unsigned char const *data,
87 unsigned int size,
88 int reason);
89
90typedef void (*tipc_msg_event) (void *usr_handle,
91 u32 portref,
92 struct sk_buff **buf,
93 unsigned char const *data,
94 unsigned int size,
95 unsigned int importance,
96 struct tipc_portid const *origin);
97
98typedef void (*tipc_named_msg_event) (void *usr_handle,
99 u32 portref,
100 struct sk_buff **buf,
101 unsigned char const *data,
102 unsigned int size,
103 unsigned int importance,
104 struct tipc_portid const *orig,
105 struct tipc_name_seq const *dest);
106
107typedef void (*tipc_conn_msg_event) (void *usr_handle,
108 u32 portref,
109 struct sk_buff **buf,
110 unsigned char const *data,
111 unsigned int size);
112
113typedef void (*tipc_continue_event) (void *usr_handle,
114 u32 portref);
115
116int tipc_createport(unsigned int tipc_user,
117 void *usr_handle,
118 unsigned int importance,
119 tipc_msg_err_event error_cb,
120 tipc_named_msg_err_event named_error_cb,
121 tipc_conn_shutdown_event conn_error_cb,
122 tipc_msg_event message_cb,
123 tipc_named_msg_event named_message_cb,
124 tipc_conn_msg_event conn_message_cb,
125 tipc_continue_event continue_event_cb,
126 u32 *portref);
127
128int tipc_deleteport(u32 portref);
129
130int tipc_ownidentity(u32 portref, struct tipc_portid *port);
131
132int tipc_portimportance(u32 portref, unsigned int *importance);
133int tipc_set_portimportance(u32 portref, unsigned int importance);
134
135int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
136int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
137
138int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
139int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
140
141int tipc_publish(u32 portref, unsigned int scope,
142 struct tipc_name_seq const *name_seq);
143int tipc_withdraw(u32 portref, unsigned int scope,
144 struct tipc_name_seq const *name_seq);
145
146int tipc_connect2port(u32 portref, struct tipc_portid const *port);
147
148int tipc_disconnect(u32 portref);
149
150int tipc_shutdown(u32 ref);
151
152/*
153 * TIPC messaging routines
154 */
155
156#define TIPC_PORT_IMPORTANCE 100 /* send using current port setting */
157
158
159int tipc_send(u32 portref,
160 unsigned int num_sect,
161 struct iovec const *msg_sect);
162
163int tipc_send2name(u32 portref,
164 struct tipc_name const *name,
165 u32 domain,
166 unsigned int num_sect,
167 struct iovec const *msg_sect);
168
169int tipc_send2port(u32 portref,
170 struct tipc_portid const *dest,
171 unsigned int num_sect,
172 struct iovec const *msg_sect);
173
174int tipc_send_buf2port(u32 portref,
175 struct tipc_portid const *dest,
176 struct sk_buff *buf,
177 unsigned int dsz);
178
179int tipc_multicast(u32 portref,
180 struct tipc_name_seq const *seq,
181 u32 domain, /* currently unused */
182 unsigned int section_count,
183 struct iovec const *msg);
184#endif
185
186#endif
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h
deleted file mode 100644
index ee2f304e491..00000000000
--- a/include/net/tipc/tipc_bearer.h
+++ /dev/null
@@ -1,138 +0,0 @@
1/*
2 * include/net/tipc/tipc_bearer.h: Include file for privileged access to TIPC bearers
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _NET_TIPC_BEARER_H_
38#define _NET_TIPC_BEARER_H_
39
40#ifdef __KERNEL__
41
42#include <linux/tipc_config.h>
43#include <linux/skbuff.h>
44#include <linux/spinlock.h>
45
46/*
47 * Identifiers of supported TIPC media types
48 */
49
50#define TIPC_MEDIA_TYPE_ETH 1
51
52/*
53 * Destination address structure used by TIPC bearers when sending messages
54 *
55 * IMPORTANT: The fields of this structure MUST be stored using the specified
56 * byte order indicated below, as the structure is exchanged between nodes
57 * as part of a link setup process.
58 */
59
60struct tipc_media_addr {
61 __be32 type; /* bearer type (network byte order) */
62 union {
63 __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */
64#if 0
65 /* Prototypes for other possible bearer types */
66
67 struct {
68 __u16 sin_family;
69 __u16 sin_port;
70 struct {
71 __u32 s_addr;
72 } sin_addr;
73 char pad[4];
74 } addr_in; /* IP-based bearer */
75 __u16 sock_descr; /* generic socket bearer */
76#endif
77 } dev_addr;
78};
79
80/**
81 * struct tipc_bearer - TIPC bearer info available to privileged users
82 * @usr_handle: pointer to additional user-defined information about bearer
83 * @mtu: max packet size bearer can support
84 * @blocked: non-zero if bearer is blocked
85 * @lock: spinlock for controlling access to bearer
86 * @addr: media-specific address associated with bearer
87 * @name: bearer name (format = media:interface)
88 *
89 * Note: TIPC initializes "name" and "lock" fields; user is responsible for
90 * initialization all other fields when a bearer is enabled.
91 */
92
93struct tipc_bearer {
94 void *usr_handle;
95 u32 mtu;
96 int blocked;
97 spinlock_t lock;
98 struct tipc_media_addr addr;
99 char name[TIPC_MAX_BEARER_NAME];
100};
101
102/*
103 * TIPC routines available to supported media types
104 */
105
106int tipc_register_media(u32 media_type,
107 char *media_name,
108 int (*enable)(struct tipc_bearer *),
109 void (*disable)(struct tipc_bearer *),
110 int (*send_msg)(struct sk_buff *,
111 struct tipc_bearer *,
112 struct tipc_media_addr *),
113 char *(*addr2str)(struct tipc_media_addr *a,
114 char *str_buf,
115 int str_size),
116 struct tipc_media_addr *bcast_addr,
117 const u32 bearer_priority,
118 const u32 link_tolerance, /* [ms] */
119 const u32 send_window_limit);
120
121void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
122
123int tipc_block_bearer(const char *name);
124void tipc_continue(struct tipc_bearer *tb_ptr);
125
126int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
127int tipc_disable_bearer(const char *name);
128
129/*
130 * Routines made available to TIPC by supported media types
131 */
132
133int tipc_eth_media_start(void);
134void tipc_eth_media_stop(void);
135
136#endif
137
138#endif
diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h
deleted file mode 100644
index ffe50b4e7b9..00000000000
--- a/include/net/tipc/tipc_msg.h
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * include/net/tipc/tipc_msg.h: Include file for privileged access to TIPC message headers
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _NET_TIPC_MSG_H_
38#define _NET_TIPC_MSG_H_
39
40#ifdef __KERNEL__
41
42struct tipc_msg {
43 __be32 hdr[15];
44};
45
46
47/*
48 TIPC user data message header format, version 2:
49
50
51 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
52 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
53 w0:|vers | user |hdr sz |n|d|s|-| message size |
54 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
55 w1:|mstyp| error |rer cnt|lsc|opt p| broadcast ack no |
56 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
57 w2:| link level ack no | broadcast/link level seq no |
58 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
59 w3:| previous node |
60 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
61 w4:| originating port |
62 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
63 w5:| destination port |
64 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
65 w6:| originating node |
66 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
67 w7:| destination node |
68 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
69 w8:| name type / transport sequence number |
70 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
71 w9:| name instance/multicast lower bound |
72 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
73 wA:| multicast upper bound |
74 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
75 / /
76 \ options \
77 / /
78 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
79
80*/
81
82#define TIPC_CONN_MSG 0
83#define TIPC_MCAST_MSG 1
84#define TIPC_NAMED_MSG 2
85#define TIPC_DIRECT_MSG 3
86
87
88static inline u32 msg_word(struct tipc_msg *m, u32 pos)
89{
90 return ntohl(m->hdr[pos]);
91}
92
93static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
94{
95 return (msg_word(m, w) >> pos) & mask;
96}
97
98static inline u32 msg_importance(struct tipc_msg *m)
99{
100 return msg_bits(m, 0, 25, 0xf);
101}
102
103static inline u32 msg_hdr_sz(struct tipc_msg *m)
104{
105 return msg_bits(m, 0, 21, 0xf) << 2;
106}
107
108static inline int msg_short(struct tipc_msg *m)
109{
110 return msg_hdr_sz(m) == 24;
111}
112
113static inline u32 msg_size(struct tipc_msg *m)
114{
115 return msg_bits(m, 0, 0, 0x1ffff);
116}
117
118static inline u32 msg_data_sz(struct tipc_msg *m)
119{
120 return msg_size(m) - msg_hdr_sz(m);
121}
122
123static inline unchar *msg_data(struct tipc_msg *m)
124{
125 return ((unchar *)m) + msg_hdr_sz(m);
126}
127
128static inline u32 msg_type(struct tipc_msg *m)
129{
130 return msg_bits(m, 1, 29, 0x7);
131}
132
133static inline u32 msg_named(struct tipc_msg *m)
134{
135 return msg_type(m) == TIPC_NAMED_MSG;
136}
137
138static inline u32 msg_mcast(struct tipc_msg *m)
139{
140 return msg_type(m) == TIPC_MCAST_MSG;
141}
142
143static inline u32 msg_connected(struct tipc_msg *m)
144{
145 return msg_type(m) == TIPC_CONN_MSG;
146}
147
148static inline u32 msg_errcode(struct tipc_msg *m)
149{
150 return msg_bits(m, 1, 25, 0xf);
151}
152
153static inline u32 msg_prevnode(struct tipc_msg *m)
154{
155 return msg_word(m, 3);
156}
157
158static inline u32 msg_origport(struct tipc_msg *m)
159{
160 return msg_word(m, 4);
161}
162
163static inline u32 msg_destport(struct tipc_msg *m)
164{
165 return msg_word(m, 5);
166}
167
168static inline u32 msg_mc_netid(struct tipc_msg *m)
169{
170 return msg_word(m, 5);
171}
172
173static inline u32 msg_orignode(struct tipc_msg *m)
174{
175 if (likely(msg_short(m)))
176 return msg_prevnode(m);
177 return msg_word(m, 6);
178}
179
180static inline u32 msg_destnode(struct tipc_msg *m)
181{
182 return msg_word(m, 7);
183}
184
185static inline u32 msg_nametype(struct tipc_msg *m)
186{
187 return msg_word(m, 8);
188}
189
190static inline u32 msg_nameinst(struct tipc_msg *m)
191{
192 return msg_word(m, 9);
193}
194
195static inline u32 msg_namelower(struct tipc_msg *m)
196{
197 return msg_nameinst(m);
198}
199
200static inline u32 msg_nameupper(struct tipc_msg *m)
201{
202 return msg_word(m, 10);
203}
204
205#endif
206
207#endif
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h
deleted file mode 100644
index 1893aaf4942..00000000000
--- a/include/net/tipc/tipc_port.h
+++ /dev/null
@@ -1,101 +0,0 @@
1/*
2 * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports
3 *
4 * Copyright (c) 1994-2007, Ericsson AB
5 * Copyright (c) 2005-2008, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _NET_TIPC_PORT_H_
38#define _NET_TIPC_PORT_H_
39
40#ifdef __KERNEL__
41
42#include <linux/tipc.h>
43#include <linux/skbuff.h>
44#include <net/tipc/tipc_msg.h>
45
46#define TIPC_FLOW_CONTROL_WIN 512
47
48/**
49 * struct tipc_port - native TIPC port info available to privileged users
50 * @usr_handle: pointer to additional user-defined information about port
51 * @lock: pointer to spinlock for controlling access to port
52 * @connected: non-zero if port is currently connected to a peer port
53 * @conn_type: TIPC type used when connection was established
54 * @conn_instance: TIPC instance used when connection was established
55 * @conn_unacked: number of unacknowledged messages received from peer port
56 * @published: non-zero if port has one or more associated names
57 * @congested: non-zero if cannot send because of link or port congestion
58 * @max_pkt: maximum packet size "hint" used when building messages sent by port
59 * @ref: unique reference to port in TIPC object registry
60 * @phdr: preformatted message header used when sending messages
61 */
62
63struct tipc_port {
64 void *usr_handle;
65 spinlock_t *lock;
66 int connected;
67 u32 conn_type;
68 u32 conn_instance;
69 u32 conn_unacked;
70 int published;
71 u32 congested;
72 u32 max_pkt;
73 u32 ref;
74 struct tipc_msg phdr;
75};
76
77
78struct tipc_port *tipc_createport_raw(void *usr_handle,
79 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
80 void (*wakeup)(struct tipc_port *),
81 const u32 importance);
82
83int tipc_reject_msg(struct sk_buff *buf, u32 err);
84
85int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
86
87void tipc_acknowledge(u32 port_ref,u32 ack);
88
89struct tipc_port *tipc_get_port(const u32 ref);
90
91/*
92 * The following routines require that the port be locked on entry
93 */
94
95int tipc_disconnect_port(struct tipc_port *tp_ptr);
96
97
98#endif
99
100#endif
101
diff --git a/include/net/x25.h b/include/net/x25.h
index 1479cb4a41f..a06119a0512 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -315,6 +315,8 @@ extern struct list_head x25_route_list;
315extern rwlock_t x25_route_list_lock; 315extern rwlock_t x25_route_list_lock;
316extern struct list_head x25_forward_list; 316extern struct list_head x25_forward_list;
317extern rwlock_t x25_forward_list_lock; 317extern rwlock_t x25_forward_list_lock;
318extern struct list_head x25_neigh_list;
319extern rwlock_t x25_neigh_list_lock;
318 320
319extern int x25_proc_init(void); 321extern int x25_proc_init(void);
320extern void x25_proc_exit(void); 322extern void x25_proc_exit(void);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index bcfb6b24b01..7fa5b005893 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -805,6 +805,9 @@ __be16 xfrm_flowi_sport(struct flowi *fl)
805 case IPPROTO_MH: 805 case IPPROTO_MH:
806 port = htons(fl->fl_mh_type); 806 port = htons(fl->fl_mh_type);
807 break; 807 break;
808 case IPPROTO_GRE:
809 port = htons(ntohl(fl->fl_gre_key) >> 16);
810 break;
808 default: 811 default:
809 port = 0; /*XXX*/ 812 port = 0; /*XXX*/
810 } 813 }
@@ -826,6 +829,9 @@ __be16 xfrm_flowi_dport(struct flowi *fl)
826 case IPPROTO_ICMPV6: 829 case IPPROTO_ICMPV6:
827 port = htons(fl->fl_icmp_code); 830 port = htons(fl->fl_icmp_code);
828 break; 831 break;
832 case IPPROTO_GRE:
833 port = htons(ntohl(fl->fl_gre_key) & 0xffff);
834 break;
829 default: 835 default:
830 port = 0; /*XXX*/ 836 port = 0; /*XXX*/
831 } 837 }
diff --git a/lib/Kconfig b/lib/Kconfig
index fa9bf2c0619..3116aa631af 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -210,4 +210,7 @@ config GENERIC_ATOMIC64
210config LRU_CACHE 210config LRU_CACHE
211 tristate 211 tristate
212 212
213config AVERAGE
214 bool
215
213endmenu 216endmenu
diff --git a/lib/Makefile b/lib/Makefile
index e6a3763b821..76d3b851490 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -106,6 +106,8 @@ obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
106 106
107obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o 107obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
108 108
109obj-$(CONFIG_AVERAGE) += average.o
110
109hostprogs-y := gen_crc32table 111hostprogs-y := gen_crc32table
110clean-files := crc32table.h 112clean-files := crc32table.h
111 113
diff --git a/lib/average.c b/lib/average.c
new file mode 100644
index 00000000000..f1d1b4660c4
--- /dev/null
+++ b/lib/average.c
@@ -0,0 +1,57 @@
1/*
2 * lib/average.c
3 *
4 * This source code is licensed under the GNU General Public License,
5 * Version 2. See the file COPYING for more details.
6 */
7
8#include <linux/module.h>
9#include <linux/average.h>
10#include <linux/bug.h>
11
12/**
13 * DOC: Exponentially Weighted Moving Average (EWMA)
14 *
15 * These are generic functions for calculating Exponentially Weighted Moving
16 * Averages (EWMA). We keep a structure with the EWMA parameters and a scaled
17 * up internal representation of the average value to prevent rounding errors.
18 * The factor for scaling up and the exponential weight (or decay rate) have to
19 * be specified thru the init fuction. The structure should not be accessed
20 * directly but only thru the helper functions.
21 */
22
23/**
24 * ewma_init() - Initialize EWMA parameters
25 * @avg: Average structure
26 * @factor: Factor to use for the scaled up internal value. The maximum value
27 * of averages can be ULONG_MAX/(factor*weight).
28 * @weight: Exponential weight, or decay rate. This defines how fast the
29 * influence of older values decreases. Has to be bigger than 1.
30 *
31 * Initialize the EWMA parameters for a given struct ewma @avg.
32 */
33void ewma_init(struct ewma *avg, unsigned long factor, unsigned long weight)
34{
35 WARN_ON(weight <= 1 || factor == 0);
36 avg->internal = 0;
37 avg->weight = weight;
38 avg->factor = factor;
39}
40EXPORT_SYMBOL(ewma_init);
41
42/**
43 * ewma_add() - Exponentially weighted moving average (EWMA)
44 * @avg: Average structure
45 * @val: Current value
46 *
47 * Add a sample to the average.
48 */
49struct ewma *ewma_add(struct ewma *avg, unsigned long val)
50{
51 avg->internal = avg->internal ?
52 (((avg->internal * (avg->weight - 1)) +
53 (val * avg->factor)) / avg->weight) :
54 (val * avg->factor);
55 return avg;
56}
57EXPORT_SYMBOL(ewma_add);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index c4706eb98d3..00e8a02681a 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -15,7 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <net/netlink.h> 16#include <net/netlink.h>
17 17
18static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = { 18static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
19 [NLA_U8] = sizeof(u8), 19 [NLA_U8] = sizeof(u8),
20 [NLA_U16] = sizeof(u16), 20 [NLA_U16] = sizeof(u16),
21 [NLA_U32] = sizeof(u32), 21 [NLA_U32] = sizeof(u32),
@@ -23,7 +23,7 @@ static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
23 [NLA_NESTED] = NLA_HDRLEN, 23 [NLA_NESTED] = NLA_HDRLEN,
24}; 24};
25 25
26static int validate_nla(struct nlattr *nla, int maxtype, 26static int validate_nla(const struct nlattr *nla, int maxtype,
27 const struct nla_policy *policy) 27 const struct nla_policy *policy)
28{ 28{
29 const struct nla_policy *pt; 29 const struct nla_policy *pt;
@@ -115,10 +115,10 @@ static int validate_nla(struct nlattr *nla, int maxtype,
115 * 115 *
116 * Returns 0 on success or a negative error code. 116 * Returns 0 on success or a negative error code.
117 */ 117 */
118int nla_validate(struct nlattr *head, int len, int maxtype, 118int nla_validate(const struct nlattr *head, int len, int maxtype,
119 const struct nla_policy *policy) 119 const struct nla_policy *policy)
120{ 120{
121 struct nlattr *nla; 121 const struct nlattr *nla;
122 int rem, err; 122 int rem, err;
123 123
124 nla_for_each_attr(nla, head, len, rem) { 124 nla_for_each_attr(nla, head, len, rem) {
@@ -173,10 +173,10 @@ nla_policy_len(const struct nla_policy *p, int n)
173 * 173 *
174 * Returns 0 on success or a negative error code. 174 * Returns 0 on success or a negative error code.
175 */ 175 */
176int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, 176int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
177 const struct nla_policy *policy) 177 int len, const struct nla_policy *policy)
178{ 178{
179 struct nlattr *nla; 179 const struct nlattr *nla;
180 int rem, err; 180 int rem, err;
181 181
182 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 182 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
@@ -191,7 +191,7 @@ int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
191 goto errout; 191 goto errout;
192 } 192 }
193 193
194 tb[type] = nla; 194 tb[type] = (struct nlattr *)nla;
195 } 195 }
196 } 196 }
197 197
@@ -212,14 +212,14 @@ errout:
212 * 212 *
213 * Returns the first attribute in the stream matching the specified type. 213 * Returns the first attribute in the stream matching the specified type.
214 */ 214 */
215struct nlattr *nla_find(struct nlattr *head, int len, int attrtype) 215struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
216{ 216{
217 struct nlattr *nla; 217 const struct nlattr *nla;
218 int rem; 218 int rem;
219 219
220 nla_for_each_attr(nla, head, len, rem) 220 nla_for_each_attr(nla, head, len, rem)
221 if (nla_type(nla) == attrtype) 221 if (nla_type(nla) == attrtype)
222 return nla; 222 return (struct nlattr *)nla;
223 223
224 return NULL; 224 return NULL;
225} 225}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 52077ca2207..6e64f7c6a2e 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -272,13 +272,11 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
272 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); 272 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
273 } 273 }
274 274
275 new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name, 275 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
276 vlan_setup, real_dev->num_tx_queues);
277 276
278 if (new_dev == NULL) 277 if (new_dev == NULL)
279 return -ENOBUFS; 278 return -ENOBUFS;
280 279
281 netif_copy_real_num_queues(new_dev, real_dev);
282 dev_net_set(new_dev, net); 280 dev_net_set(new_dev, net);
283 /* need 4 bytes for extra VLAN header info, 281 /* need 4 bytes for extra VLAN header info,
284 * hope the underlying device can handle it. 282 * hope the underlying device can handle it.
@@ -334,12 +332,15 @@ static void vlan_transfer_features(struct net_device *dev,
334 vlandev->features &= ~dev->vlan_features; 332 vlandev->features &= ~dev->vlan_features;
335 vlandev->features |= dev->features & dev->vlan_features; 333 vlandev->features |= dev->features & dev->vlan_features;
336 vlandev->gso_max_size = dev->gso_max_size; 334 vlandev->gso_max_size = dev->gso_max_size;
335
336 if (dev->features & NETIF_F_HW_VLAN_TX)
337 vlandev->hard_header_len = dev->hard_header_len;
338 else
339 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
340
337#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 341#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
338 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 342 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
339#endif 343#endif
340 vlandev->real_num_tx_queues = dev->real_num_tx_queues;
341 BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
342
343 if (old_features != vlandev->features) 344 if (old_features != vlandev->features)
344 netdev_features_change(vlandev); 345 netdev_features_change(vlandev);
345} 346}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index db01b3181fd..5687c9b95f3 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -19,19 +19,25 @@ struct vlan_priority_tci_mapping {
19 19
20 20
21/** 21/**
22 * struct vlan_rx_stats - VLAN percpu rx stats 22 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
23 * @rx_packets: number of received packets 23 * @rx_packets: number of received packets
24 * @rx_bytes: number of received bytes 24 * @rx_bytes: number of received bytes
25 * @rx_multicast: number of received multicast packets 25 * @rx_multicast: number of received multicast packets
26 * @tx_packets: number of transmitted packets
27 * @tx_bytes: number of transmitted bytes
26 * @syncp: synchronization point for 64bit counters 28 * @syncp: synchronization point for 64bit counters
27 * @rx_errors: number of errors 29 * @rx_errors: number of rx errors
30 * @tx_dropped: number of tx drops
28 */ 31 */
29struct vlan_rx_stats { 32struct vlan_pcpu_stats {
30 u64 rx_packets; 33 u64 rx_packets;
31 u64 rx_bytes; 34 u64 rx_bytes;
32 u64 rx_multicast; 35 u64 rx_multicast;
36 u64 tx_packets;
37 u64 tx_bytes;
33 struct u64_stats_sync syncp; 38 struct u64_stats_sync syncp;
34 unsigned long rx_errors; 39 u32 rx_errors;
40 u32 tx_dropped;
35}; 41};
36 42
37/** 43/**
@@ -45,9 +51,7 @@ struct vlan_rx_stats {
45 * @real_dev: underlying netdevice 51 * @real_dev: underlying netdevice
46 * @real_dev_addr: address of underlying netdevice 52 * @real_dev_addr: address of underlying netdevice
47 * @dent: proc dir entry 53 * @dent: proc dir entry
48 * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX 54 * @vlan_pcpu_stats: ptr to percpu rx stats
49 * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX
50 * @vlan_rx_stats: ptr to percpu rx stats
51 */ 55 */
52struct vlan_dev_info { 56struct vlan_dev_info {
53 unsigned int nr_ingress_mappings; 57 unsigned int nr_ingress_mappings;
@@ -62,9 +66,7 @@ struct vlan_dev_info {
62 unsigned char real_dev_addr[ETH_ALEN]; 66 unsigned char real_dev_addr[ETH_ALEN];
63 67
64 struct proc_dir_entry *dent; 68 struct proc_dir_entry *dent;
65 unsigned long cnt_inc_headroom_on_tx; 69 struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
66 unsigned long cnt_encap_on_xmit;
67 struct vlan_rx_stats __percpu *vlan_rx_stats;
68}; 70};
69 71
70static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) 72static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 69b2f79800a..ce8e3ab3e7a 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -9,7 +9,7 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
9 struct sk_buff *skb = *skbp; 9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
11 struct net_device *vlan_dev; 11 struct net_device *vlan_dev;
12 struct vlan_rx_stats *rx_stats; 12 struct vlan_pcpu_stats *rx_stats;
13 13
14 vlan_dev = vlan_find_dev(skb->dev, vlan_id); 14 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
15 if (!vlan_dev) { 15 if (!vlan_dev) {
@@ -26,7 +26,7 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); 26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
27 skb->vlan_tci = 0; 27 skb->vlan_tci = 0;
28 28
29 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_rx_stats); 29 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
30 30
31 u64_stats_update_begin(&rx_stats->syncp); 31 u64_stats_update_begin(&rx_stats->syncp);
32 rx_stats->rx_packets++; 32 rx_stats->rx_packets++;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 14e3d1fa07a..be737539f34 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -141,7 +141,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
141 struct packet_type *ptype, struct net_device *orig_dev) 141 struct packet_type *ptype, struct net_device *orig_dev)
142{ 142{
143 struct vlan_hdr *vhdr; 143 struct vlan_hdr *vhdr;
144 struct vlan_rx_stats *rx_stats; 144 struct vlan_pcpu_stats *rx_stats;
145 struct net_device *vlan_dev; 145 struct net_device *vlan_dev;
146 u16 vlan_id; 146 u16 vlan_id;
147 u16 vlan_tci; 147 u16 vlan_tci;
@@ -177,7 +177,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
177 } else { 177 } else {
178 skb->dev = vlan_dev; 178 skb->dev = vlan_dev;
179 179
180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats); 180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
181 181
182 u64_stats_update_begin(&rx_stats->syncp); 182 u64_stats_update_begin(&rx_stats->syncp);
183 rx_stats->rx_packets++; 183 rx_stats->rx_packets++;
@@ -274,9 +274,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
274 u16 vlan_tci = 0; 274 u16 vlan_tci = 0;
275 int rc; 275 int rc;
276 276
277 if (WARN_ON(skb_headroom(skb) < dev->hard_header_len))
278 return -ENOSPC;
279
280 if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) { 277 if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
281 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); 278 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
282 279
@@ -313,8 +310,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
313static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, 310static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
314 struct net_device *dev) 311 struct net_device *dev)
315{ 312{
316 int i = skb_get_queue_mapping(skb);
317 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
318 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 313 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
319 unsigned int len; 314 unsigned int len;
320 int ret; 315 int ret;
@@ -326,71 +321,31 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
326 */ 321 */
327 if (veth->h_vlan_proto != htons(ETH_P_8021Q) || 322 if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
328 vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { 323 vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
329 unsigned int orig_headroom = skb_headroom(skb);
330 u16 vlan_tci; 324 u16 vlan_tci;
331
332 vlan_dev_info(dev)->cnt_encap_on_xmit++;
333
334 vlan_tci = vlan_dev_info(dev)->vlan_id; 325 vlan_tci = vlan_dev_info(dev)->vlan_id;
335 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 326 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
336 skb = __vlan_put_tag(skb, vlan_tci); 327 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
337 if (!skb) {
338 txq->tx_dropped++;
339 return NETDEV_TX_OK;
340 }
341
342 if (orig_headroom < VLAN_HLEN)
343 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
344 } 328 }
345 329
346
347 skb_set_dev(skb, vlan_dev_info(dev)->real_dev); 330 skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
348 len = skb->len; 331 len = skb->len;
349 ret = dev_queue_xmit(skb); 332 ret = dev_queue_xmit(skb);
350 333
351 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 334 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
352 txq->tx_packets++; 335 struct vlan_pcpu_stats *stats;
353 txq->tx_bytes += len;
354 } else
355 txq->tx_dropped++;
356 336
357 return ret; 337 stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
358} 338 u64_stats_update_begin(&stats->syncp);
359 339 stats->tx_packets++;
360static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, 340 stats->tx_bytes += len;
361 struct net_device *dev) 341 u64_stats_update_begin(&stats->syncp);
362{ 342 } else {
363 int i = skb_get_queue_mapping(skb); 343 this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
364 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 344 }
365 u16 vlan_tci;
366 unsigned int len;
367 int ret;
368
369 vlan_tci = vlan_dev_info(dev)->vlan_id;
370 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
371 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
372
373 skb->dev = vlan_dev_info(dev)->real_dev;
374 len = skb->len;
375 ret = dev_queue_xmit(skb);
376
377 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
378 txq->tx_packets++;
379 txq->tx_bytes += len;
380 } else
381 txq->tx_dropped++;
382 345
383 return ret; 346 return ret;
384} 347}
385 348
386static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
387{
388 struct net_device *rdev = vlan_dev_info(dev)->real_dev;
389 const struct net_device_ops *ops = rdev->netdev_ops;
390
391 return ops->ndo_select_queue(rdev, skb);
392}
393
394static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 349static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
395{ 350{
396 /* TODO: gotta make sure the underlying layer can handle it, 351 /* TODO: gotta make sure the underlying layer can handle it,
@@ -719,8 +674,7 @@ static const struct header_ops vlan_header_ops = {
719 .parse = eth_header_parse, 674 .parse = eth_header_parse,
720}; 675};
721 676
722static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops, 677static const struct net_device_ops vlan_netdev_ops;
723 vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
724 678
725static int vlan_dev_init(struct net_device *dev) 679static int vlan_dev_init(struct net_device *dev)
726{ 680{
@@ -738,6 +692,7 @@ static int vlan_dev_init(struct net_device *dev)
738 (1<<__LINK_STATE_PRESENT); 692 (1<<__LINK_STATE_PRESENT);
739 693
740 dev->features |= real_dev->features & real_dev->vlan_features; 694 dev->features |= real_dev->features & real_dev->vlan_features;
695 dev->features |= NETIF_F_LLTX;
741 dev->gso_max_size = real_dev->gso_max_size; 696 dev->gso_max_size = real_dev->gso_max_size;
742 697
743 /* ipv6 shared card related stuff */ 698 /* ipv6 shared card related stuff */
@@ -755,26 +710,20 @@ static int vlan_dev_init(struct net_device *dev)
755 if (real_dev->features & NETIF_F_HW_VLAN_TX) { 710 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
756 dev->header_ops = real_dev->header_ops; 711 dev->header_ops = real_dev->header_ops;
757 dev->hard_header_len = real_dev->hard_header_len; 712 dev->hard_header_len = real_dev->hard_header_len;
758 if (real_dev->netdev_ops->ndo_select_queue)
759 dev->netdev_ops = &vlan_netdev_accel_ops_sq;
760 else
761 dev->netdev_ops = &vlan_netdev_accel_ops;
762 } else { 713 } else {
763 dev->header_ops = &vlan_header_ops; 714 dev->header_ops = &vlan_header_ops;
764 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 715 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
765 if (real_dev->netdev_ops->ndo_select_queue)
766 dev->netdev_ops = &vlan_netdev_ops_sq;
767 else
768 dev->netdev_ops = &vlan_netdev_ops;
769 } 716 }
770 717
718 dev->netdev_ops = &vlan_netdev_ops;
719
771 if (is_vlan_dev(real_dev)) 720 if (is_vlan_dev(real_dev))
772 subclass = 1; 721 subclass = 1;
773 722
774 vlan_dev_set_lockdep_class(dev, subclass); 723 vlan_dev_set_lockdep_class(dev, subclass);
775 724
776 vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats); 725 vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
777 if (!vlan_dev_info(dev)->vlan_rx_stats) 726 if (!vlan_dev_info(dev)->vlan_pcpu_stats)
778 return -ENOMEM; 727 return -ENOMEM;
779 728
780 return 0; 729 return 0;
@@ -786,8 +735,8 @@ static void vlan_dev_uninit(struct net_device *dev)
786 struct vlan_dev_info *vlan = vlan_dev_info(dev); 735 struct vlan_dev_info *vlan = vlan_dev_info(dev);
787 int i; 736 int i;
788 737
789 free_percpu(vlan->vlan_rx_stats); 738 free_percpu(vlan->vlan_pcpu_stats);
790 vlan->vlan_rx_stats = NULL; 739 vlan->vlan_pcpu_stats = NULL;
791 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 740 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
792 while ((pm = vlan->egress_priority_map[i]) != NULL) { 741 while ((pm = vlan->egress_priority_map[i]) != NULL) {
793 vlan->egress_priority_map[i] = pm->next; 742 vlan->egress_priority_map[i] = pm->next;
@@ -825,33 +774,37 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
825 774
826static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 775static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
827{ 776{
828 dev_txq_stats_fold(dev, stats);
829 777
830 if (vlan_dev_info(dev)->vlan_rx_stats) { 778 if (vlan_dev_info(dev)->vlan_pcpu_stats) {
831 struct vlan_rx_stats *p, accum = {0}; 779 struct vlan_pcpu_stats *p;
780 u32 rx_errors = 0, tx_dropped = 0;
832 int i; 781 int i;
833 782
834 for_each_possible_cpu(i) { 783 for_each_possible_cpu(i) {
835 u64 rxpackets, rxbytes, rxmulticast; 784 u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
836 unsigned int start; 785 unsigned int start;
837 786
838 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); 787 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
839 do { 788 do {
840 start = u64_stats_fetch_begin_bh(&p->syncp); 789 start = u64_stats_fetch_begin_bh(&p->syncp);
841 rxpackets = p->rx_packets; 790 rxpackets = p->rx_packets;
842 rxbytes = p->rx_bytes; 791 rxbytes = p->rx_bytes;
843 rxmulticast = p->rx_multicast; 792 rxmulticast = p->rx_multicast;
793 txpackets = p->tx_packets;
794 txbytes = p->tx_bytes;
844 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 795 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
845 accum.rx_packets += rxpackets; 796
846 accum.rx_bytes += rxbytes; 797 stats->rx_packets += rxpackets;
847 accum.rx_multicast += rxmulticast; 798 stats->rx_bytes += rxbytes;
848 /* rx_errors is ulong, not protected by syncp */ 799 stats->multicast += rxmulticast;
849 accum.rx_errors += p->rx_errors; 800 stats->tx_packets += txpackets;
801 stats->tx_bytes += txbytes;
802 /* rx_errors & tx_dropped are u32 */
803 rx_errors += p->rx_errors;
804 tx_dropped += p->tx_dropped;
850 } 805 }
851 stats->rx_packets = accum.rx_packets; 806 stats->rx_errors = rx_errors;
852 stats->rx_bytes = accum.rx_bytes; 807 stats->tx_dropped = tx_dropped;
853 stats->rx_errors = accum.rx_errors;
854 stats->multicast = accum.rx_multicast;
855 } 808 }
856 return stats; 809 return stats;
857} 810}
@@ -908,80 +861,6 @@ static const struct net_device_ops vlan_netdev_ops = {
908#endif 861#endif
909}; 862};
910 863
911static const struct net_device_ops vlan_netdev_accel_ops = {
912 .ndo_change_mtu = vlan_dev_change_mtu,
913 .ndo_init = vlan_dev_init,
914 .ndo_uninit = vlan_dev_uninit,
915 .ndo_open = vlan_dev_open,
916 .ndo_stop = vlan_dev_stop,
917 .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
918 .ndo_validate_addr = eth_validate_addr,
919 .ndo_set_mac_address = vlan_dev_set_mac_address,
920 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
921 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
922 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
923 .ndo_do_ioctl = vlan_dev_ioctl,
924 .ndo_neigh_setup = vlan_dev_neigh_setup,
925 .ndo_get_stats64 = vlan_dev_get_stats64,
926#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
927 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
928 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
929 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
930 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
931 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
932#endif
933};
934
935static const struct net_device_ops vlan_netdev_ops_sq = {
936 .ndo_select_queue = vlan_dev_select_queue,
937 .ndo_change_mtu = vlan_dev_change_mtu,
938 .ndo_init = vlan_dev_init,
939 .ndo_uninit = vlan_dev_uninit,
940 .ndo_open = vlan_dev_open,
941 .ndo_stop = vlan_dev_stop,
942 .ndo_start_xmit = vlan_dev_hard_start_xmit,
943 .ndo_validate_addr = eth_validate_addr,
944 .ndo_set_mac_address = vlan_dev_set_mac_address,
945 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
946 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
947 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
948 .ndo_do_ioctl = vlan_dev_ioctl,
949 .ndo_neigh_setup = vlan_dev_neigh_setup,
950 .ndo_get_stats64 = vlan_dev_get_stats64,
951#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
952 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
953 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
954 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
955 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
956 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
957#endif
958};
959
960static const struct net_device_ops vlan_netdev_accel_ops_sq = {
961 .ndo_select_queue = vlan_dev_select_queue,
962 .ndo_change_mtu = vlan_dev_change_mtu,
963 .ndo_init = vlan_dev_init,
964 .ndo_uninit = vlan_dev_uninit,
965 .ndo_open = vlan_dev_open,
966 .ndo_stop = vlan_dev_stop,
967 .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
968 .ndo_validate_addr = eth_validate_addr,
969 .ndo_set_mac_address = vlan_dev_set_mac_address,
970 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
971 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
972 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
973 .ndo_do_ioctl = vlan_dev_ioctl,
974 .ndo_neigh_setup = vlan_dev_neigh_setup,
975 .ndo_get_stats64 = vlan_dev_get_stats64,
976#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
977 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
978 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
979 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
980 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
981 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
982#endif
983};
984
985void vlan_setup(struct net_device *dev) 864void vlan_setup(struct net_device *dev)
986{ 865{
987 ether_setup(dev); 866 ether_setup(dev);
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index ddc105734af..be9a5c19a77 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -101,25 +101,6 @@ static int vlan_changelink(struct net_device *dev,
101 return 0; 101 return 0;
102} 102}
103 103
104static int vlan_get_tx_queues(struct net *net,
105 struct nlattr *tb[],
106 unsigned int *num_tx_queues,
107 unsigned int *real_num_tx_queues)
108{
109 struct net_device *real_dev;
110
111 if (!tb[IFLA_LINK])
112 return -EINVAL;
113
114 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
115 if (!real_dev)
116 return -ENODEV;
117
118 *num_tx_queues = real_dev->num_tx_queues;
119 *real_num_tx_queues = real_dev->real_num_tx_queues;
120 return 0;
121}
122
123static int vlan_newlink(struct net *src_net, struct net_device *dev, 104static int vlan_newlink(struct net *src_net, struct net_device *dev,
124 struct nlattr *tb[], struct nlattr *data[]) 105 struct nlattr *tb[], struct nlattr *data[])
125{ 106{
@@ -237,7 +218,6 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
237 .maxtype = IFLA_VLAN_MAX, 218 .maxtype = IFLA_VLAN_MAX,
238 .policy = vlan_policy, 219 .policy = vlan_policy,
239 .priv_size = sizeof(struct vlan_dev_info), 220 .priv_size = sizeof(struct vlan_dev_info),
240 .get_tx_queues = vlan_get_tx_queues,
241 .setup = vlan_setup, 221 .setup = vlan_setup,
242 .validate = vlan_validate, 222 .validate = vlan_validate,
243 .newlink = vlan_newlink, 223 .newlink = vlan_newlink,
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 80e280f5668..d1314cf18ad 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -280,7 +280,6 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
280 const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); 280 const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
281 struct rtnl_link_stats64 temp; 281 struct rtnl_link_stats64 temp;
282 const struct rtnl_link_stats64 *stats; 282 const struct rtnl_link_stats64 *stats;
283 static const char fmt[] = "%30s %12lu\n";
284 static const char fmt64[] = "%30s %12llu\n"; 283 static const char fmt64[] = "%30s %12llu\n";
285 int i; 284 int i;
286 285
@@ -299,10 +298,6 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
299 seq_puts(seq, "\n"); 298 seq_puts(seq, "\n");
300 seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); 299 seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
301 seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); 300 seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
302 seq_printf(seq, fmt, "total headroom inc",
303 dev_info->cnt_inc_headroom_on_tx);
304 seq_printf(seq, fmt, "total encap on xmit",
305 dev_info->cnt_encap_on_xmit);
306 seq_printf(seq, "Device: %s", dev_info->real_dev->name); 301 seq_printf(seq, "Device: %s", dev_info->real_dev->name);
307 /* now show all PRIORITY mappings relating to this VLAN */ 302 /* now show all PRIORITY mappings relating to this VLAN */
308 seq_printf(seq, "\nINGRESS priority mappings: " 303 seq_printf(seq, "\nINGRESS priority mappings: "
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 45c15f49140..798beac7f10 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -27,31 +27,16 @@
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/kernel.h>
30#include <linux/uaccess.h> 31#include <linux/uaccess.h>
31#include <linux/slab.h> 32#include <linux/slab.h>
32#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/stddef.h>
33#include <linux/types.h> 35#include <linux/types.h>
34#include <net/9p/9p.h> 36#include <net/9p/9p.h>
35#include <net/9p/client.h> 37#include <net/9p/client.h>
36#include "protocol.h" 38#include "protocol.h"
37 39
38#ifndef MIN
39#define MIN(a, b) (((a) < (b)) ? (a) : (b))
40#endif
41
42#ifndef MAX
43#define MAX(a, b) (((a) > (b)) ? (a) : (b))
44#endif
45
46#ifndef offset_of
47#define offset_of(type, memb) \
48 ((unsigned long)(&((type *)0)->memb))
49#endif
50#ifndef container_of
51#define container_of(obj, type, memb) \
52 ((type *)(((char *)obj) - offset_of(type, memb)))
53#endif
54
55static int 40static int
56p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); 41p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
57 42
@@ -104,7 +89,7 @@ EXPORT_SYMBOL(p9stat_free);
104 89
105static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size) 90static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
106{ 91{
107 size_t len = MIN(pdu->size - pdu->offset, size); 92 size_t len = min(pdu->size - pdu->offset, size);
108 memcpy(data, &pdu->sdata[pdu->offset], len); 93 memcpy(data, &pdu->sdata[pdu->offset], len);
109 pdu->offset += len; 94 pdu->offset += len;
110 return size - len; 95 return size - len;
@@ -112,7 +97,7 @@ static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
112 97
113static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size) 98static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
114{ 99{
115 size_t len = MIN(pdu->capacity - pdu->size, size); 100 size_t len = min(pdu->capacity - pdu->size, size);
116 memcpy(&pdu->sdata[pdu->size], data, len); 101 memcpy(&pdu->sdata[pdu->size], data, len);
117 pdu->size += len; 102 pdu->size += len;
118 return size - len; 103 return size - len;
@@ -121,7 +106,7 @@ static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
121static size_t 106static size_t
122pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size) 107pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
123{ 108{
124 size_t len = MIN(pdu->capacity - pdu->size, size); 109 size_t len = min(pdu->capacity - pdu->size, size);
125 if (copy_from_user(&pdu->sdata[pdu->size], udata, len)) 110 if (copy_from_user(&pdu->sdata[pdu->size], udata, len))
126 len = 0; 111 len = 0;
127 112
@@ -201,7 +186,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
201 if (errcode) 186 if (errcode)
202 break; 187 break;
203 188
204 size = MAX(len, 0); 189 size = max_t(int16_t, len, 0);
205 190
206 *sptr = kmalloc(size + 1, GFP_KERNEL); 191 *sptr = kmalloc(size + 1, GFP_KERNEL);
207 if (*sptr == NULL) { 192 if (*sptr == NULL) {
@@ -256,8 +241,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
256 p9pdu_readf(pdu, proto_version, "d", count); 241 p9pdu_readf(pdu, proto_version, "d", count);
257 if (!errcode) { 242 if (!errcode) {
258 *count = 243 *count =
259 MIN(*count, 244 min_t(int32_t, *count,
260 pdu->size - pdu->offset); 245 pdu->size - pdu->offset);
261 *data = &pdu->sdata[pdu->offset]; 246 *data = &pdu->sdata[pdu->offset];
262 } 247 }
263 } 248 }
@@ -421,7 +406,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
421 const char *sptr = va_arg(ap, const char *); 406 const char *sptr = va_arg(ap, const char *);
422 int16_t len = 0; 407 int16_t len = 0;
423 if (sptr) 408 if (sptr)
424 len = MIN(strlen(sptr), USHRT_MAX); 409 len = min_t(int16_t, strlen(sptr), USHRT_MAX);
425 410
426 errcode = p9pdu_writef(pdu, proto_version, 411 errcode = p9pdu_writef(pdu, proto_version,
427 "w", len); 412 "w", len);
diff --git a/net/Kconfig b/net/Kconfig
index 55fd82e9ffd..126c2af0fc1 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -220,6 +220,11 @@ config RPS
220 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS 220 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
221 default y 221 default y
222 222
223config XPS
224 boolean
225 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
226 default y
227
223menu "Network testing" 228menu "Network testing"
224 229
225config NET_PKTGEN 230config NET_PKTGEN
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index ad2b232a205..fce2eae8d47 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -97,7 +97,7 @@ static LIST_HEAD(br2684_devs);
97 97
98static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev) 98static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev)
99{ 99{
100 return (struct br2684_dev *)netdev_priv(net_dev); 100 return netdev_priv(net_dev);
101} 101}
102 102
103static inline struct net_device *list_entry_brdev(const struct list_head *le) 103static inline struct net_device *list_entry_brdev(const struct list_head *le)
diff --git a/net/atm/clip.c b/net/atm/clip.c
index ff956d1115b..d257da50fcf 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -502,7 +502,8 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
502 struct atmarp_entry *entry; 502 struct atmarp_entry *entry;
503 int error; 503 int error;
504 struct clip_vcc *clip_vcc; 504 struct clip_vcc *clip_vcc;
505 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, .tos = 1}} }; 505 struct flowi fl = { .fl4_dst = ip,
506 .fl4_tos = 1 };
506 struct rtable *rt; 507 struct rtable *rt;
507 508
508 if (vcc->push != clip_push) { 509 if (vcc->push != clip_push) {
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 181d70c73d7..179e04bc99d 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -816,8 +816,7 @@ static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
816 if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg]) 816 if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
817 return -EINVAL; 817 return -EINVAL;
818 vcc->proto_data = dev_lec[arg]; 818 vcc->proto_data = dev_lec[arg];
819 return lec_mcast_make((struct lec_priv *)netdev_priv(dev_lec[arg]), 819 return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
820 vcc);
821} 820}
822 821
823/* Initialize device. */ 822/* Initialize device. */
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index d1e433f7d67..7ca1f46a471 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_BT_BNEP) += bnep/
10obj-$(CONFIG_BT_CMTP) += cmtp/ 10obj-$(CONFIG_BT_CMTP) += cmtp/
11obj-$(CONFIG_BT_HIDP) += hidp/ 11obj-$(CONFIG_BT_HIDP) += hidp/
12 12
13bluetooth-objs := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o hci_sysfs.o lib.o 13bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o hci_sysfs.o lib.o
diff --git a/net/bridge/br.c b/net/bridge/br.c
index c8436fa3134..84bbb82599b 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -22,8 +22,6 @@
22 22
23#include "br_private.h" 23#include "br_private.h"
24 24
25int (*br_should_route_hook)(struct sk_buff *skb);
26
27static const struct stp_proto br_stp_proto = { 25static const struct stp_proto br_stp_proto = {
28 .rcv = br_stp_rcv, 26 .rcv = br_stp_rcv,
29}; 27};
@@ -102,8 +100,6 @@ static void __exit br_deinit(void)
102 br_fdb_fini(); 100 br_fdb_fini();
103} 101}
104 102
105EXPORT_SYMBOL(br_should_route_hook);
106
107module_init(br_init) 103module_init(br_init)
108module_exit(br_deinit) 104module_exit(br_deinit)
109MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 90512ccfd3e..2872393b293 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -238,15 +238,18 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
238int br_fdb_test_addr(struct net_device *dev, unsigned char *addr) 238int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
239{ 239{
240 struct net_bridge_fdb_entry *fdb; 240 struct net_bridge_fdb_entry *fdb;
241 struct net_bridge_port *port;
241 int ret; 242 int ret;
242 243
243 if (!br_port_exists(dev))
244 return 0;
245
246 rcu_read_lock(); 244 rcu_read_lock();
247 fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr); 245 port = br_port_get_rcu(dev);
248 ret = fdb && fdb->dst->dev != dev && 246 if (!port)
249 fdb->dst->state == BR_STATE_FORWARDING; 247 ret = 0;
248 else {
249 fdb = __br_fdb_get(port->br, addr);
250 ret = fdb && fdb->dst->dev != dev &&
251 fdb->dst->state == BR_STATE_FORWARDING;
252 }
250 rcu_read_unlock(); 253 rcu_read_unlock();
251 254
252 return ret; 255 return ret;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index cbfe87f0f34..2bd11ec6d16 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -223,7 +223,7 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
223 struct net_bridge_port_group *p; 223 struct net_bridge_port_group *p;
224 struct hlist_node *rp; 224 struct hlist_node *rp;
225 225
226 rp = rcu_dereference(br->router_list.first); 226 rp = rcu_dereference(hlist_first_rcu(&br->router_list));
227 p = mdst ? rcu_dereference(mdst->ports) : NULL; 227 p = mdst ? rcu_dereference(mdst->ports) : NULL;
228 while (p || rp) { 228 while (p || rp) {
229 struct net_bridge_port *port, *lport, *rport; 229 struct net_bridge_port *port, *lport, *rport;
@@ -242,7 +242,7 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
242 if ((unsigned long)lport >= (unsigned long)port) 242 if ((unsigned long)lport >= (unsigned long)port)
243 p = rcu_dereference(p->next); 243 p = rcu_dereference(p->next);
244 if ((unsigned long)rport >= (unsigned long)port) 244 if ((unsigned long)rport >= (unsigned long)port)
245 rp = rcu_dereference(rp->next); 245 rp = rcu_dereference(hlist_next_rcu(rp));
246 } 246 }
247 247
248 if (!prev) 248 if (!prev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 89ad25a7620..d9d1e2bac1d 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -475,11 +475,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
475{ 475{
476 struct net_bridge_port *p; 476 struct net_bridge_port *p;
477 477
478 if (!br_port_exists(dev)) 478 p = br_port_get_rtnl(dev);
479 return -EINVAL; 479 if (!p || p->br != br)
480
481 p = br_port_get(dev);
482 if (p->br != br)
483 return -EINVAL; 480 return -EINVAL;
484 481
485 del_nbp(p); 482 del_nbp(p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 25207a1f182..6f6d8e1b776 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -21,6 +21,10 @@
21/* Bridge group multicast address 802.1d (pg 51). */ 21/* Bridge group multicast address 802.1d (pg 51). */
22const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 22const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
23 23
24/* Hook for brouter */
25br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
26EXPORT_SYMBOL(br_should_route_hook);
27
24static int br_pass_frame_up(struct sk_buff *skb) 28static int br_pass_frame_up(struct sk_buff *skb)
25{ 29{
26 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; 30 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
@@ -139,7 +143,7 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb)
139{ 143{
140 struct net_bridge_port *p; 144 struct net_bridge_port *p;
141 const unsigned char *dest = eth_hdr(skb)->h_dest; 145 const unsigned char *dest = eth_hdr(skb)->h_dest;
142 int (*rhook)(struct sk_buff *skb); 146 br_should_route_hook_t *rhook;
143 147
144 if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) 148 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
145 return skb; 149 return skb;
@@ -173,8 +177,8 @@ forward:
173 switch (p->state) { 177 switch (p->state) {
174 case BR_STATE_FORWARDING: 178 case BR_STATE_FORWARDING:
175 rhook = rcu_dereference(br_should_route_hook); 179 rhook = rcu_dereference(br_should_route_hook);
176 if (rhook != NULL) { 180 if (rhook) {
177 if (rhook(skb)) 181 if ((*rhook)(skb))
178 return skb; 182 return skb;
179 dest = eth_hdr(skb)->h_dest; 183 dest = eth_hdr(skb)->h_dest;
180 } 184 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index eb5b256ffc8..326e599f83f 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -33,6 +33,9 @@
33 33
34#include "br_private.h" 34#include "br_private.h"
35 35
36#define mlock_dereference(X, br) \
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
38
36#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
37static inline int ipv6_is_local_multicast(const struct in6_addr *addr) 40static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
38{ 41{
@@ -135,7 +138,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
135struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 138struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
136 struct sk_buff *skb) 139 struct sk_buff *skb)
137{ 140{
138 struct net_bridge_mdb_htable *mdb = br->mdb; 141 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
139 struct br_ip ip; 142 struct br_ip ip;
140 143
141 if (br->multicast_disabled) 144 if (br->multicast_disabled)
@@ -235,7 +238,8 @@ static void br_multicast_group_expired(unsigned long data)
235 if (mp->ports) 238 if (mp->ports)
236 goto out; 239 goto out;
237 240
238 mdb = br->mdb; 241 mdb = mlock_dereference(br->mdb, br);
242
239 hlist_del_rcu(&mp->hlist[mdb->ver]); 243 hlist_del_rcu(&mp->hlist[mdb->ver]);
240 mdb->size--; 244 mdb->size--;
241 245
@@ -249,16 +253,20 @@ out:
249static void br_multicast_del_pg(struct net_bridge *br, 253static void br_multicast_del_pg(struct net_bridge *br,
250 struct net_bridge_port_group *pg) 254 struct net_bridge_port_group *pg)
251{ 255{
252 struct net_bridge_mdb_htable *mdb = br->mdb; 256 struct net_bridge_mdb_htable *mdb;
253 struct net_bridge_mdb_entry *mp; 257 struct net_bridge_mdb_entry *mp;
254 struct net_bridge_port_group *p; 258 struct net_bridge_port_group *p;
255 struct net_bridge_port_group **pp; 259 struct net_bridge_port_group __rcu **pp;
260
261 mdb = mlock_dereference(br->mdb, br);
256 262
257 mp = br_mdb_ip_get(mdb, &pg->addr); 263 mp = br_mdb_ip_get(mdb, &pg->addr);
258 if (WARN_ON(!mp)) 264 if (WARN_ON(!mp))
259 return; 265 return;
260 266
261 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 267 for (pp = &mp->ports;
268 (p = mlock_dereference(*pp, br)) != NULL;
269 pp = &p->next) {
262 if (p != pg) 270 if (p != pg)
263 continue; 271 continue;
264 272
@@ -294,10 +302,10 @@ out:
294 spin_unlock(&br->multicast_lock); 302 spin_unlock(&br->multicast_lock);
295} 303}
296 304
297static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max, 305static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
298 int elasticity) 306 int elasticity)
299{ 307{
300 struct net_bridge_mdb_htable *old = *mdbp; 308 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
301 struct net_bridge_mdb_htable *mdb; 309 struct net_bridge_mdb_htable *mdb;
302 int err; 310 int err;
303 311
@@ -569,7 +577,7 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
569 struct net_bridge *br, struct net_bridge_port *port, 577 struct net_bridge *br, struct net_bridge_port *port,
570 struct br_ip *group, int hash) 578 struct br_ip *group, int hash)
571{ 579{
572 struct net_bridge_mdb_htable *mdb = br->mdb; 580 struct net_bridge_mdb_htable *mdb;
573 struct net_bridge_mdb_entry *mp; 581 struct net_bridge_mdb_entry *mp;
574 struct hlist_node *p; 582 struct hlist_node *p;
575 unsigned count = 0; 583 unsigned count = 0;
@@ -577,6 +585,7 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
577 int elasticity; 585 int elasticity;
578 int err; 586 int err;
579 587
588 mdb = rcu_dereference_protected(br->mdb, 1);
580 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 589 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
581 count++; 590 count++;
582 if (unlikely(br_ip_equal(group, &mp->addr))) 591 if (unlikely(br_ip_equal(group, &mp->addr)))
@@ -642,10 +651,11 @@ static struct net_bridge_mdb_entry *br_multicast_new_group(
642 struct net_bridge *br, struct net_bridge_port *port, 651 struct net_bridge *br, struct net_bridge_port *port,
643 struct br_ip *group) 652 struct br_ip *group)
644{ 653{
645 struct net_bridge_mdb_htable *mdb = br->mdb; 654 struct net_bridge_mdb_htable *mdb;
646 struct net_bridge_mdb_entry *mp; 655 struct net_bridge_mdb_entry *mp;
647 int hash; 656 int hash;
648 657
658 mdb = rcu_dereference_protected(br->mdb, 1);
649 if (!mdb) { 659 if (!mdb) {
650 if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0)) 660 if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0))
651 return NULL; 661 return NULL;
@@ -660,7 +670,7 @@ static struct net_bridge_mdb_entry *br_multicast_new_group(
660 670
661 case -EAGAIN: 671 case -EAGAIN:
662rehash: 672rehash:
663 mdb = br->mdb; 673 mdb = rcu_dereference_protected(br->mdb, 1);
664 hash = br_ip_hash(mdb, group); 674 hash = br_ip_hash(mdb, group);
665 break; 675 break;
666 676
@@ -692,7 +702,7 @@ static int br_multicast_add_group(struct net_bridge *br,
692{ 702{
693 struct net_bridge_mdb_entry *mp; 703 struct net_bridge_mdb_entry *mp;
694 struct net_bridge_port_group *p; 704 struct net_bridge_port_group *p;
695 struct net_bridge_port_group **pp; 705 struct net_bridge_port_group __rcu **pp;
696 unsigned long now = jiffies; 706 unsigned long now = jiffies;
697 int err; 707 int err;
698 708
@@ -712,7 +722,9 @@ static int br_multicast_add_group(struct net_bridge *br,
712 goto out; 722 goto out;
713 } 723 }
714 724
715 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 725 for (pp = &mp->ports;
726 (p = mlock_dereference(*pp, br)) != NULL;
727 pp = &p->next) {
716 if (p->port == port) 728 if (p->port == port)
717 goto found; 729 goto found;
718 if ((unsigned long)p->port < (unsigned long)port) 730 if ((unsigned long)p->port < (unsigned long)port)
@@ -1106,7 +1118,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1106 struct net_bridge_mdb_entry *mp; 1118 struct net_bridge_mdb_entry *mp;
1107 struct igmpv3_query *ih3; 1119 struct igmpv3_query *ih3;
1108 struct net_bridge_port_group *p; 1120 struct net_bridge_port_group *p;
1109 struct net_bridge_port_group **pp; 1121 struct net_bridge_port_group __rcu **pp;
1110 unsigned long max_delay; 1122 unsigned long max_delay;
1111 unsigned long now = jiffies; 1123 unsigned long now = jiffies;
1112 __be32 group; 1124 __be32 group;
@@ -1145,7 +1157,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1145 if (!group) 1157 if (!group)
1146 goto out; 1158 goto out;
1147 1159
1148 mp = br_mdb_ip4_get(br->mdb, group); 1160 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group);
1149 if (!mp) 1161 if (!mp)
1150 goto out; 1162 goto out;
1151 1163
@@ -1157,7 +1169,9 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1157 try_to_del_timer_sync(&mp->timer) >= 0)) 1169 try_to_del_timer_sync(&mp->timer) >= 0))
1158 mod_timer(&mp->timer, now + max_delay); 1170 mod_timer(&mp->timer, now + max_delay);
1159 1171
1160 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 1172 for (pp = &mp->ports;
1173 (p = mlock_dereference(*pp, br)) != NULL;
1174 pp = &p->next) {
1161 if (timer_pending(&p->timer) ? 1175 if (timer_pending(&p->timer) ?
1162 time_after(p->timer.expires, now + max_delay) : 1176 time_after(p->timer.expires, now + max_delay) :
1163 try_to_del_timer_sync(&p->timer) >= 0) 1177 try_to_del_timer_sync(&p->timer) >= 0)
@@ -1178,7 +1192,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1178 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); 1192 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1179 struct net_bridge_mdb_entry *mp; 1193 struct net_bridge_mdb_entry *mp;
1180 struct mld2_query *mld2q; 1194 struct mld2_query *mld2q;
1181 struct net_bridge_port_group *p, **pp; 1195 struct net_bridge_port_group *p;
1196 struct net_bridge_port_group __rcu **pp;
1182 unsigned long max_delay; 1197 unsigned long max_delay;
1183 unsigned long now = jiffies; 1198 unsigned long now = jiffies;
1184 struct in6_addr *group = NULL; 1199 struct in6_addr *group = NULL;
@@ -1214,7 +1229,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1214 if (!group) 1229 if (!group)
1215 goto out; 1230 goto out;
1216 1231
1217 mp = br_mdb_ip6_get(br->mdb, group); 1232 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group);
1218 if (!mp) 1233 if (!mp)
1219 goto out; 1234 goto out;
1220 1235
@@ -1225,7 +1240,9 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1225 try_to_del_timer_sync(&mp->timer) >= 0)) 1240 try_to_del_timer_sync(&mp->timer) >= 0))
1226 mod_timer(&mp->timer, now + max_delay); 1241 mod_timer(&mp->timer, now + max_delay);
1227 1242
1228 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 1243 for (pp = &mp->ports;
1244 (p = mlock_dereference(*pp, br)) != NULL;
1245 pp = &p->next) {
1229 if (timer_pending(&p->timer) ? 1246 if (timer_pending(&p->timer) ?
1230 time_after(p->timer.expires, now + max_delay) : 1247 time_after(p->timer.expires, now + max_delay) :
1231 try_to_del_timer_sync(&p->timer) >= 0) 1248 try_to_del_timer_sync(&p->timer) >= 0)
@@ -1254,7 +1271,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1254 timer_pending(&br->multicast_querier_timer)) 1271 timer_pending(&br->multicast_querier_timer))
1255 goto out; 1272 goto out;
1256 1273
1257 mdb = br->mdb; 1274 mdb = mlock_dereference(br->mdb, br);
1258 mp = br_mdb_ip_get(mdb, group); 1275 mp = br_mdb_ip_get(mdb, group);
1259 if (!mp) 1276 if (!mp)
1260 goto out; 1277 goto out;
@@ -1277,7 +1294,9 @@ static void br_multicast_leave_group(struct net_bridge *br,
1277 goto out; 1294 goto out;
1278 } 1295 }
1279 1296
1280 for (p = mp->ports; p; p = p->next) { 1297 for (p = mlock_dereference(mp->ports, br);
1298 p != NULL;
1299 p = mlock_dereference(p->next, br)) {
1281 if (p->port != port) 1300 if (p->port != port)
1282 continue; 1301 continue;
1283 1302
@@ -1625,7 +1644,7 @@ void br_multicast_stop(struct net_bridge *br)
1625 del_timer_sync(&br->multicast_query_timer); 1644 del_timer_sync(&br->multicast_query_timer);
1626 1645
1627 spin_lock_bh(&br->multicast_lock); 1646 spin_lock_bh(&br->multicast_lock);
1628 mdb = br->mdb; 1647 mdb = mlock_dereference(br->mdb, br);
1629 if (!mdb) 1648 if (!mdb)
1630 goto out; 1649 goto out;
1631 1650
@@ -1729,6 +1748,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1729{ 1748{
1730 struct net_bridge_port *port; 1749 struct net_bridge_port *port;
1731 int err = 0; 1750 int err = 0;
1751 struct net_bridge_mdb_htable *mdb;
1732 1752
1733 spin_lock(&br->multicast_lock); 1753 spin_lock(&br->multicast_lock);
1734 if (br->multicast_disabled == !val) 1754 if (br->multicast_disabled == !val)
@@ -1741,15 +1761,16 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1741 if (!netif_running(br->dev)) 1761 if (!netif_running(br->dev))
1742 goto unlock; 1762 goto unlock;
1743 1763
1744 if (br->mdb) { 1764 mdb = mlock_dereference(br->mdb, br);
1745 if (br->mdb->old) { 1765 if (mdb) {
1766 if (mdb->old) {
1746 err = -EEXIST; 1767 err = -EEXIST;
1747rollback: 1768rollback:
1748 br->multicast_disabled = !!val; 1769 br->multicast_disabled = !!val;
1749 goto unlock; 1770 goto unlock;
1750 } 1771 }
1751 1772
1752 err = br_mdb_rehash(&br->mdb, br->mdb->max, 1773 err = br_mdb_rehash(&br->mdb, mdb->max,
1753 br->hash_elasticity); 1774 br->hash_elasticity);
1754 if (err) 1775 if (err)
1755 goto rollback; 1776 goto rollback;
@@ -1774,6 +1795,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1774{ 1795{
1775 int err = -ENOENT; 1796 int err = -ENOENT;
1776 u32 old; 1797 u32 old;
1798 struct net_bridge_mdb_htable *mdb;
1777 1799
1778 spin_lock(&br->multicast_lock); 1800 spin_lock(&br->multicast_lock);
1779 if (!netif_running(br->dev)) 1801 if (!netif_running(br->dev))
@@ -1782,7 +1804,9 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1782 err = -EINVAL; 1804 err = -EINVAL;
1783 if (!is_power_of_2(val)) 1805 if (!is_power_of_2(val))
1784 goto unlock; 1806 goto unlock;
1785 if (br->mdb && val < br->mdb->size) 1807
1808 mdb = mlock_dereference(br->mdb, br);
1809 if (mdb && val < mdb->size)
1786 goto unlock; 1810 goto unlock;
1787 1811
1788 err = 0; 1812 err = 0;
@@ -1790,8 +1814,8 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1790 old = br->hash_max; 1814 old = br->hash_max;
1791 br->hash_max = val; 1815 br->hash_max = val;
1792 1816
1793 if (br->mdb) { 1817 if (mdb) {
1794 if (br->mdb->old) { 1818 if (mdb->old) {
1795 err = -EEXIST; 1819 err = -EEXIST;
1796rollback: 1820rollback:
1797 br->hash_max = old; 1821 br->hash_max = old;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 865fd7634b6..6e139209391 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -131,17 +131,18 @@ void br_netfilter_rtable_init(struct net_bridge *br)
131 131
132static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) 132static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
133{ 133{
134 if (!br_port_exists(dev)) 134 struct net_bridge_port *port;
135 return NULL; 135
136 return &br_port_get_rcu(dev)->br->fake_rtable; 136 port = br_port_get_rcu(dev);
137 return port ? &port->br->fake_rtable : NULL;
137} 138}
138 139
139static inline struct net_device *bridge_parent(const struct net_device *dev) 140static inline struct net_device *bridge_parent(const struct net_device *dev)
140{ 141{
141 if (!br_port_exists(dev)) 142 struct net_bridge_port *port;
142 return NULL;
143 143
144 return br_port_get_rcu(dev)->br->dev; 144 port = br_port_get_rcu(dev);
145 return port ? port->br->dev : NULL;
145} 146}
146 147
147static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) 148static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
@@ -412,13 +413,8 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
412 if (dnat_took_place(skb)) { 413 if (dnat_took_place(skb)) {
413 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { 414 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
414 struct flowi fl = { 415 struct flowi fl = {
415 .nl_u = { 416 .fl4_dst = iph->daddr,
416 .ip4_u = { 417 .fl4_tos = RT_TOS(iph->tos),
417 .daddr = iph->daddr,
418 .saddr = 0,
419 .tos = RT_TOS(iph->tos) },
420 },
421 .proto = 0,
422 }; 418 };
423 struct in_device *in_dev = __in_dev_get_rcu(dev); 419 struct in_device *in_dev = __in_dev_get_rcu(dev);
424 420
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4a6a378c84e..f8bf4c7f842 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -119,11 +119,13 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
119 119
120 idx = 0; 120 idx = 0;
121 for_each_netdev(net, dev) { 121 for_each_netdev(net, dev) {
122 struct net_bridge_port *port = br_port_get_rtnl(dev);
123
122 /* not a bridge port */ 124 /* not a bridge port */
123 if (!br_port_exists(dev) || idx < cb->args[0]) 125 if (!port || idx < cb->args[0])
124 goto skip; 126 goto skip;
125 127
126 if (br_fill_ifinfo(skb, br_port_get(dev), 128 if (br_fill_ifinfo(skb, port,
127 NETLINK_CB(cb->skb).pid, 129 NETLINK_CB(cb->skb).pid,
128 cb->nlh->nlmsg_seq, RTM_NEWLINK, 130 cb->nlh->nlmsg_seq, RTM_NEWLINK,
129 NLM_F_MULTI) < 0) 131 NLM_F_MULTI) < 0)
@@ -169,9 +171,9 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
169 if (!dev) 171 if (!dev)
170 return -ENODEV; 172 return -ENODEV;
171 173
172 if (!br_port_exists(dev)) 174 p = br_port_get_rtnl(dev);
175 if (!p)
173 return -EINVAL; 176 return -EINVAL;
174 p = br_port_get(dev);
175 177
176 /* if kernel STP is running, don't allow changes */ 178 /* if kernel STP is running, don't allow changes */
177 if (p->br->stp_enabled == BR_KERNEL_STP) 179 if (p->br->stp_enabled == BR_KERNEL_STP)
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 404d4e14c6a..7d337c9b608 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -32,15 +32,15 @@ struct notifier_block br_device_notifier = {
32static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr) 32static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
33{ 33{
34 struct net_device *dev = ptr; 34 struct net_device *dev = ptr;
35 struct net_bridge_port *p = br_port_get(dev); 35 struct net_bridge_port *p;
36 struct net_bridge *br; 36 struct net_bridge *br;
37 int err; 37 int err;
38 38
39 /* not a port of a bridge */ 39 /* not a port of a bridge */
40 if (!br_port_exists(dev)) 40 p = br_port_get_rtnl(dev);
41 if (!p)
41 return NOTIFY_DONE; 42 return NOTIFY_DONE;
42 43
43 p = br_port_get(dev);
44 br = p->br; 44 br = p->br;
45 45
46 switch (event) { 46 switch (event) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 75c90edaf7d..84aac7734bf 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -72,7 +72,7 @@ struct net_bridge_fdb_entry
72 72
73struct net_bridge_port_group { 73struct net_bridge_port_group {
74 struct net_bridge_port *port; 74 struct net_bridge_port *port;
75 struct net_bridge_port_group *next; 75 struct net_bridge_port_group __rcu *next;
76 struct hlist_node mglist; 76 struct hlist_node mglist;
77 struct rcu_head rcu; 77 struct rcu_head rcu;
78 struct timer_list timer; 78 struct timer_list timer;
@@ -86,7 +86,7 @@ struct net_bridge_mdb_entry
86 struct hlist_node hlist[2]; 86 struct hlist_node hlist[2];
87 struct hlist_node mglist; 87 struct hlist_node mglist;
88 struct net_bridge *br; 88 struct net_bridge *br;
89 struct net_bridge_port_group *ports; 89 struct net_bridge_port_group __rcu *ports;
90 struct rcu_head rcu; 90 struct rcu_head rcu;
91 struct timer_list timer; 91 struct timer_list timer;
92 struct timer_list query_timer; 92 struct timer_list query_timer;
@@ -151,11 +151,20 @@ struct net_bridge_port
151#endif 151#endif
152}; 152};
153 153
154#define br_port_get_rcu(dev) \
155 ((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
156#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data)
157#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT) 154#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
158 155
156static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
157{
158 struct net_bridge_port *port = rcu_dereference(dev->rx_handler_data);
159 return br_port_exists(dev) ? port : NULL;
160}
161
162static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev)
163{
164 return br_port_exists(dev) ?
165 rtnl_dereference(dev->rx_handler_data) : NULL;
166}
167
159struct br_cpu_netstats { 168struct br_cpu_netstats {
160 u64 rx_packets; 169 u64 rx_packets;
161 u64 rx_bytes; 170 u64 rx_bytes;
@@ -227,7 +236,7 @@ struct net_bridge
227 unsigned long multicast_startup_query_interval; 236 unsigned long multicast_startup_query_interval;
228 237
229 spinlock_t multicast_lock; 238 spinlock_t multicast_lock;
230 struct net_bridge_mdb_htable *mdb; 239 struct net_bridge_mdb_htable __rcu *mdb;
231 struct hlist_head router_list; 240 struct hlist_head router_list;
232 struct hlist_head mglist; 241 struct hlist_head mglist;
233 242
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 35cf27087b5..3d9a55d3822 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -141,10 +141,6 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
141 struct net_bridge *br; 141 struct net_bridge *br;
142 const unsigned char *buf; 142 const unsigned char *buf;
143 143
144 if (!br_port_exists(dev))
145 goto err;
146 p = br_port_get_rcu(dev);
147
148 if (!pskb_may_pull(skb, 4)) 144 if (!pskb_may_pull(skb, 4))
149 goto err; 145 goto err;
150 146
@@ -153,6 +149,10 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
153 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) 149 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
154 goto err; 150 goto err;
155 151
152 p = br_port_get_rcu(dev);
153 if (!p)
154 goto err;
155
156 br = p->br; 156 br = p->br;
157 spin_lock(&br->lock); 157 spin_lock(&br->lock);
158 158
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index ae3f106c390..1bcaf36ad61 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -87,7 +87,8 @@ static int __init ebtable_broute_init(void)
87 if (ret < 0) 87 if (ret < 0)
88 return ret; 88 return ret;
89 /* see br_input.c */ 89 /* see br_input.c */
90 rcu_assign_pointer(br_should_route_hook, ebt_broute); 90 rcu_assign_pointer(br_should_route_hook,
91 (br_should_route_hook_t *)ebt_broute);
91 return 0; 92 return 0;
92} 93}
93 94
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index a1dcf83f0d5..cbc9f395ab1 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -128,6 +128,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
128 const struct net_device *in, const struct net_device *out) 128 const struct net_device *in, const struct net_device *out)
129{ 129{
130 const struct ethhdr *h = eth_hdr(skb); 130 const struct ethhdr *h = eth_hdr(skb);
131 const struct net_bridge_port *p;
131 __be16 ethproto; 132 __be16 ethproto;
132 int verdict, i; 133 int verdict, i;
133 134
@@ -148,13 +149,11 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
148 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT)) 149 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
149 return 1; 150 return 1;
150 /* rcu_read_lock()ed by nf_hook_slow */ 151 /* rcu_read_lock()ed by nf_hook_slow */
151 if (in && br_port_exists(in) && 152 if (in && (p = br_port_get_rcu(in)) != NULL &&
152 FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev), 153 FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
153 EBT_ILOGICALIN))
154 return 1; 154 return 1;
155 if (out && br_port_exists(out) && 155 if (out && (p = br_port_get_rcu(out)) != NULL &&
156 FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev), 156 FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
157 EBT_ILOGICALOUT))
158 return 1; 157 return 1;
159 158
160 if (e->bitmask & EBT_SOURCEMAC) { 159 if (e->bitmask & EBT_SOURCEMAC) {
diff --git a/net/caif/Makefile b/net/caif/Makefile
index f87481fb0e6..9d38e406e4a 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -1,8 +1,6 @@
1ifeq ($(CONFIG_CAIF_DEBUG),y) 1ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
2EXTRA_CFLAGS += -DDEBUG
3endif
4 2
5caif-objs := caif_dev.o \ 3caif-y := caif_dev.o \
6 cfcnfg.o cfmuxl.o cfctrl.o \ 4 cfcnfg.o cfmuxl.o cfctrl.o \
7 cffrml.o cfveil.o cfdbgl.o\ 5 cffrml.o cfveil.o cfdbgl.o\
8 cfserl.o cfdgml.o \ 6 cfserl.o cfdgml.o \
@@ -13,4 +11,4 @@ obj-$(CONFIG_CAIF) += caif.o
13obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o 11obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
14obj-$(CONFIG_CAIF) += caif_socket.o 12obj-$(CONFIG_CAIF) += caif_socket.o
15 13
16export-objs := caif.o 14export-y := caif.o
diff --git a/net/can/Makefile b/net/can/Makefile
index 9cd3c4b3abd..2d3894b3274 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -3,10 +3,10 @@
3# 3#
4 4
5obj-$(CONFIG_CAN) += can.o 5obj-$(CONFIG_CAN) += can.o
6can-objs := af_can.o proc.o 6can-y := af_can.o proc.o
7 7
8obj-$(CONFIG_CAN_RAW) += can-raw.o 8obj-$(CONFIG_CAN_RAW) += can-raw.o
9can-raw-objs := raw.o 9can-raw-y := raw.o
10 10
11obj-$(CONFIG_CAN_BCM) += can-bcm.o 11obj-$(CONFIG_CAN_BCM) += can-bcm.o
12can-bcm-objs := bcm.o 12can-bcm-y := bcm.o
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
index 5f19415ec9c..e87ef435e11 100644
--- a/net/ceph/Makefile
+++ b/net/ceph/Makefile
@@ -3,7 +3,7 @@
3# 3#
4obj-$(CONFIG_CEPH_LIB) += libceph.o 4obj-$(CONFIG_CEPH_LIB) += libceph.o
5 5
6libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \ 6libceph-y := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
7 mon_client.o \ 7 mon_client.o \
8 osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \ 8 osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
9 debugfs.o \ 9 debugfs.o \
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cd1e039c875..18ac112ea7a 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -177,7 +177,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
177 * interrupt level will suddenly eat the receive_queue. 177 * interrupt level will suddenly eat the receive_queue.
178 * 178 *
179 * Look at current nfs client by the way... 179 * Look at current nfs client by the way...
180 * However, this function was corrent in any case. 8) 180 * However, this function was correct in any case. 8)
181 */ 181 */
182 unsigned long cpu_flags; 182 unsigned long cpu_flags;
183 183
diff --git a/net/core/dev.c b/net/core/dev.c
index 0dd54a69dac..d28b3a023bb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -743,34 +743,31 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
743EXPORT_SYMBOL(dev_get_by_index); 743EXPORT_SYMBOL(dev_get_by_index);
744 744
745/** 745/**
746 * dev_getbyhwaddr - find a device by its hardware address 746 * dev_getbyhwaddr_rcu - find a device by its hardware address
747 * @net: the applicable net namespace 747 * @net: the applicable net namespace
748 * @type: media type of device 748 * @type: media type of device
749 * @ha: hardware address 749 * @ha: hardware address
750 * 750 *
751 * Search for an interface by MAC address. Returns NULL if the device 751 * Search for an interface by MAC address. Returns NULL if the device
752 * is not found or a pointer to the device. The caller must hold the 752 * is not found or a pointer to the device. The caller must hold RCU
753 * rtnl semaphore. The returned device has not had its ref count increased 753 * The returned device has not had its ref count increased
754 * and the caller must therefore be careful about locking 754 * and the caller must therefore be careful about locking
755 * 755 *
756 * BUGS:
757 * If the API was consistent this would be __dev_get_by_hwaddr
758 */ 756 */
759 757
760struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha) 758struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
759 const char *ha)
761{ 760{
762 struct net_device *dev; 761 struct net_device *dev;
763 762
764 ASSERT_RTNL(); 763 for_each_netdev_rcu(net, dev)
765
766 for_each_netdev(net, dev)
767 if (dev->type == type && 764 if (dev->type == type &&
768 !memcmp(dev->dev_addr, ha, dev->addr_len)) 765 !memcmp(dev->dev_addr, ha, dev->addr_len))
769 return dev; 766 return dev;
770 767
771 return NULL; 768 return NULL;
772} 769}
773EXPORT_SYMBOL(dev_getbyhwaddr); 770EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
774 771
775struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 772struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
776{ 773{
@@ -1557,12 +1554,19 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1557 */ 1554 */
1558int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 1555int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1559{ 1556{
1557 int rc;
1558
1560 if (txq < 1 || txq > dev->num_tx_queues) 1559 if (txq < 1 || txq > dev->num_tx_queues)
1561 return -EINVAL; 1560 return -EINVAL;
1562 1561
1563 if (dev->reg_state == NETREG_REGISTERED) { 1562 if (dev->reg_state == NETREG_REGISTERED) {
1564 ASSERT_RTNL(); 1563 ASSERT_RTNL();
1565 1564
1565 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1566 txq);
1567 if (rc)
1568 return rc;
1569
1566 if (txq < dev->real_num_tx_queues) 1570 if (txq < dev->real_num_tx_queues)
1567 qdisc_reset_all_tx_gt(dev, txq); 1571 qdisc_reset_all_tx_gt(dev, txq);
1568 } 1572 }
@@ -1794,16 +1798,18 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1794 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1798 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1795 struct packet_type *ptype; 1799 struct packet_type *ptype;
1796 __be16 type = skb->protocol; 1800 __be16 type = skb->protocol;
1801 int vlan_depth = ETH_HLEN;
1797 int err; 1802 int err;
1798 1803
1799 if (type == htons(ETH_P_8021Q)) { 1804 while (type == htons(ETH_P_8021Q)) {
1800 struct vlan_ethhdr *veh; 1805 struct vlan_hdr *vh;
1801 1806
1802 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) 1807 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1803 return ERR_PTR(-EINVAL); 1808 return ERR_PTR(-EINVAL);
1804 1809
1805 veh = (struct vlan_ethhdr *)skb->data; 1810 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1806 type = veh->h_vlan_encapsulated_proto; 1811 type = vh->h_vlan_encapsulated_proto;
1812 vlan_depth += VLAN_HLEN;
1807 } 1813 }
1808 1814
1809 skb_reset_mac_header(skb); 1815 skb_reset_mac_header(skb);
@@ -1817,8 +1823,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1817 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) 1823 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1818 dev->ethtool_ops->get_drvinfo(dev, &info); 1824 dev->ethtool_ops->get_drvinfo(dev, &info);
1819 1825
1820 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d " 1826 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
1821 "ip_summed=%d",
1822 info.driver, dev ? dev->features : 0L, 1827 info.driver, dev ? dev->features : 0L,
1823 skb->sk ? skb->sk->sk_route_caps : 0L, 1828 skb->sk ? skb->sk->sk_route_caps : 0L,
1824 skb->len, skb->data_len, skb->ip_summed); 1829 skb->len, skb->data_len, skb->ip_summed);
@@ -1967,6 +1972,23 @@ static inline void skb_orphan_try(struct sk_buff *skb)
1967 } 1972 }
1968} 1973}
1969 1974
1975int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
1976{
1977 __be16 protocol = skb->protocol;
1978
1979 if (protocol == htons(ETH_P_8021Q)) {
1980 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1981 protocol = veh->h_vlan_encapsulated_proto;
1982 } else if (!skb->vlan_tci)
1983 return dev->features;
1984
1985 if (protocol != htons(ETH_P_8021Q))
1986 return dev->features & dev->vlan_features;
1987 else
1988 return 0;
1989}
1990EXPORT_SYMBOL(netif_get_vlan_features);
1991
1970/* 1992/*
1971 * Returns true if either: 1993 * Returns true if either:
1972 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 1994 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
@@ -1977,15 +1999,20 @@ static inline void skb_orphan_try(struct sk_buff *skb)
1977static inline int skb_needs_linearize(struct sk_buff *skb, 1999static inline int skb_needs_linearize(struct sk_buff *skb,
1978 struct net_device *dev) 2000 struct net_device *dev)
1979{ 2001{
1980 int features = dev->features; 2002 if (skb_is_nonlinear(skb)) {
2003 int features = dev->features;
1981 2004
1982 if (skb->protocol == htons(ETH_P_8021Q) || vlan_tx_tag_present(skb)) 2005 if (vlan_tx_tag_present(skb))
1983 features &= dev->vlan_features; 2006 features &= dev->vlan_features;
2007
2008 return (skb_has_frag_list(skb) &&
2009 !(features & NETIF_F_FRAGLIST)) ||
2010 (skb_shinfo(skb)->nr_frags &&
2011 (!(features & NETIF_F_SG) ||
2012 illegal_highdma(dev, skb)));
2013 }
1984 2014
1985 return skb_is_nonlinear(skb) && 2015 return 0;
1986 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
1987 (skb_shinfo(skb)->nr_frags && (!(features & NETIF_F_SG) ||
1988 illegal_highdma(dev, skb))));
1989} 2016}
1990 2017
1991int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2018int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -1995,9 +2022,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1995 int rc = NETDEV_TX_OK; 2022 int rc = NETDEV_TX_OK;
1996 2023
1997 if (likely(!skb->next)) { 2024 if (likely(!skb->next)) {
1998 if (!list_empty(&ptype_all))
1999 dev_queue_xmit_nit(skb, dev);
2000
2001 /* 2025 /*
2002 * If device doesnt need skb->dst, release it right now while 2026 * If device doesnt need skb->dst, release it right now while
2003 * its hot in this cpu cache 2027 * its hot in this cpu cache
@@ -2005,6 +2029,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2005 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2029 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2006 skb_dst_drop(skb); 2030 skb_dst_drop(skb);
2007 2031
2032 if (!list_empty(&ptype_all))
2033 dev_queue_xmit_nit(skb, dev);
2034
2008 skb_orphan_try(skb); 2035 skb_orphan_try(skb);
2009 2036
2010 if (vlan_tx_tag_present(skb) && 2037 if (vlan_tx_tag_present(skb) &&
@@ -2119,26 +2146,70 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2119 return queue_index; 2146 return queue_index;
2120} 2147}
2121 2148
2149static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2150{
2151#ifdef CONFIG_XPS
2152 struct xps_dev_maps *dev_maps;
2153 struct xps_map *map;
2154 int queue_index = -1;
2155
2156 rcu_read_lock();
2157 dev_maps = rcu_dereference(dev->xps_maps);
2158 if (dev_maps) {
2159 map = rcu_dereference(
2160 dev_maps->cpu_map[raw_smp_processor_id()]);
2161 if (map) {
2162 if (map->len == 1)
2163 queue_index = map->queues[0];
2164 else {
2165 u32 hash;
2166 if (skb->sk && skb->sk->sk_hash)
2167 hash = skb->sk->sk_hash;
2168 else
2169 hash = (__force u16) skb->protocol ^
2170 skb->rxhash;
2171 hash = jhash_1word(hash, hashrnd);
2172 queue_index = map->queues[
2173 ((u64)hash * map->len) >> 32];
2174 }
2175 if (unlikely(queue_index >= dev->real_num_tx_queues))
2176 queue_index = -1;
2177 }
2178 }
2179 rcu_read_unlock();
2180
2181 return queue_index;
2182#else
2183 return -1;
2184#endif
2185}
2186
2122static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2187static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2123 struct sk_buff *skb) 2188 struct sk_buff *skb)
2124{ 2189{
2125 int queue_index; 2190 int queue_index;
2126 const struct net_device_ops *ops = dev->netdev_ops; 2191 const struct net_device_ops *ops = dev->netdev_ops;
2127 2192
2128 if (ops->ndo_select_queue) { 2193 if (dev->real_num_tx_queues == 1)
2194 queue_index = 0;
2195 else if (ops->ndo_select_queue) {
2129 queue_index = ops->ndo_select_queue(dev, skb); 2196 queue_index = ops->ndo_select_queue(dev, skb);
2130 queue_index = dev_cap_txqueue(dev, queue_index); 2197 queue_index = dev_cap_txqueue(dev, queue_index);
2131 } else { 2198 } else {
2132 struct sock *sk = skb->sk; 2199 struct sock *sk = skb->sk;
2133 queue_index = sk_tx_queue_get(sk); 2200 queue_index = sk_tx_queue_get(sk);
2134 if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
2135 2201
2136 queue_index = 0; 2202 if (queue_index < 0 || skb->ooo_okay ||
2137 if (dev->real_num_tx_queues > 1) 2203 queue_index >= dev->real_num_tx_queues) {
2204 int old_index = queue_index;
2205
2206 queue_index = get_xps_queue(dev, skb);
2207 if (queue_index < 0)
2138 queue_index = skb_tx_hash(dev, skb); 2208 queue_index = skb_tx_hash(dev, skb);
2139 2209
2140 if (sk) { 2210 if (queue_index != old_index && sk) {
2141 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1); 2211 struct dst_entry *dst =
2212 rcu_dereference_check(sk->sk_dst_cache, 1);
2142 2213
2143 if (dst && skb_dst(skb) == dst) 2214 if (dst && skb_dst(skb) == dst)
2144 sk_tx_queue_set(sk, queue_index); 2215 sk_tx_queue_set(sk, queue_index);
@@ -4967,10 +5038,13 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
4967 } 5038 }
4968 5039
4969 if (features & NETIF_F_UFO) { 5040 if (features & NETIF_F_UFO) {
4970 if (!(features & NETIF_F_GEN_CSUM)) { 5041 /* maybe split UFO into V4 and V6? */
5042 if (!((features & NETIF_F_GEN_CSUM) ||
5043 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5044 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4971 if (name) 5045 if (name)
4972 printk(KERN_ERR "%s: Dropping NETIF_F_UFO " 5046 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4973 "since no NETIF_F_HW_CSUM feature.\n", 5047 "since no checksum offload features.\n",
4974 name); 5048 name);
4975 features &= ~NETIF_F_UFO; 5049 features &= ~NETIF_F_UFO;
4976 } 5050 }
@@ -5014,9 +5088,9 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5014} 5088}
5015EXPORT_SYMBOL(netif_stacked_transfer_operstate); 5089EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5016 5090
5091#ifdef CONFIG_RPS
5017static int netif_alloc_rx_queues(struct net_device *dev) 5092static int netif_alloc_rx_queues(struct net_device *dev)
5018{ 5093{
5019#ifdef CONFIG_RPS
5020 unsigned int i, count = dev->num_rx_queues; 5094 unsigned int i, count = dev->num_rx_queues;
5021 struct netdev_rx_queue *rx; 5095 struct netdev_rx_queue *rx;
5022 5096
@@ -5029,15 +5103,22 @@ static int netif_alloc_rx_queues(struct net_device *dev)
5029 } 5103 }
5030 dev->_rx = rx; 5104 dev->_rx = rx;
5031 5105
5032 /*
5033 * Set a pointer to first element in the array which holds the
5034 * reference count.
5035 */
5036 for (i = 0; i < count; i++) 5106 for (i = 0; i < count; i++)
5037 rx[i].first = rx; 5107 rx[i].dev = dev;
5038#endif
5039 return 0; 5108 return 0;
5040} 5109}
5110#endif
5111
5112static void netdev_init_one_queue(struct net_device *dev,
5113 struct netdev_queue *queue, void *_unused)
5114{
5115 /* Initialize queue lock */
5116 spin_lock_init(&queue->_xmit_lock);
5117 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5118 queue->xmit_lock_owner = -1;
5119 netdev_queue_numa_node_write(queue, -1);
5120 queue->dev = dev;
5121}
5041 5122
5042static int netif_alloc_netdev_queues(struct net_device *dev) 5123static int netif_alloc_netdev_queues(struct net_device *dev)
5043{ 5124{
@@ -5053,25 +5134,11 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5053 return -ENOMEM; 5134 return -ENOMEM;
5054 } 5135 }
5055 dev->_tx = tx; 5136 dev->_tx = tx;
5056 return 0;
5057}
5058 5137
5059static void netdev_init_one_queue(struct net_device *dev,
5060 struct netdev_queue *queue,
5061 void *_unused)
5062{
5063 queue->dev = dev;
5064
5065 /* Initialize queue lock */
5066 spin_lock_init(&queue->_xmit_lock);
5067 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5068 queue->xmit_lock_owner = -1;
5069}
5070
5071static void netdev_init_queues(struct net_device *dev)
5072{
5073 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5138 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5074 spin_lock_init(&dev->tx_global_lock); 5139 spin_lock_init(&dev->tx_global_lock);
5140
5141 return 0;
5075} 5142}
5076 5143
5077/** 5144/**
@@ -5110,16 +5177,6 @@ int register_netdevice(struct net_device *dev)
5110 5177
5111 dev->iflink = -1; 5178 dev->iflink = -1;
5112 5179
5113 ret = netif_alloc_rx_queues(dev);
5114 if (ret)
5115 goto out;
5116
5117 ret = netif_alloc_netdev_queues(dev);
5118 if (ret)
5119 goto out;
5120
5121 netdev_init_queues(dev);
5122
5123 /* Init, if this function is available */ 5180 /* Init, if this function is available */
5124 if (dev->netdev_ops->ndo_init) { 5181 if (dev->netdev_ops->ndo_init) {
5125 ret = dev->netdev_ops->ndo_init(dev); 5182 ret = dev->netdev_ops->ndo_init(dev);
@@ -5577,10 +5634,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5577 5634
5578 dev->num_tx_queues = queue_count; 5635 dev->num_tx_queues = queue_count;
5579 dev->real_num_tx_queues = queue_count; 5636 dev->real_num_tx_queues = queue_count;
5637 if (netif_alloc_netdev_queues(dev))
5638 goto free_pcpu;
5580 5639
5581#ifdef CONFIG_RPS 5640#ifdef CONFIG_RPS
5582 dev->num_rx_queues = queue_count; 5641 dev->num_rx_queues = queue_count;
5583 dev->real_num_rx_queues = queue_count; 5642 dev->real_num_rx_queues = queue_count;
5643 if (netif_alloc_rx_queues(dev))
5644 goto free_pcpu;
5584#endif 5645#endif
5585 5646
5586 dev->gso_max_size = GSO_MAX_SIZE; 5647 dev->gso_max_size = GSO_MAX_SIZE;
@@ -5597,6 +5658,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5597 5658
5598free_pcpu: 5659free_pcpu:
5599 free_percpu(dev->pcpu_refcnt); 5660 free_percpu(dev->pcpu_refcnt);
5661 kfree(dev->_tx);
5662#ifdef CONFIG_RPS
5663 kfree(dev->_rx);
5664#endif
5665
5600free_p: 5666free_p:
5601 kfree(p); 5667 kfree(p);
5602 return NULL; 5668 return NULL;
@@ -5618,6 +5684,9 @@ void free_netdev(struct net_device *dev)
5618 release_net(dev_net(dev)); 5684 release_net(dev_net(dev));
5619 5685
5620 kfree(dev->_tx); 5686 kfree(dev->_tx);
5687#ifdef CONFIG_RPS
5688 kfree(dev->_rx);
5689#endif
5621 5690
5622 kfree(rcu_dereference_raw(dev->ingress_queue)); 5691 kfree(rcu_dereference_raw(dev->ingress_queue));
5623 5692
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 956a9f4971c..d5bc2881888 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1171,7 +1171,9 @@ static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
1171 return -EFAULT; 1171 return -EFAULT;
1172 if (edata.data && !(dev->features & NETIF_F_SG)) 1172 if (edata.data && !(dev->features & NETIF_F_SG))
1173 return -EINVAL; 1173 return -EINVAL;
1174 if (edata.data && !(dev->features & NETIF_F_HW_CSUM)) 1174 if (edata.data && !((dev->features & NETIF_F_GEN_CSUM) ||
1175 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
1176 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
1175 return -EINVAL; 1177 return -EINVAL;
1176 return dev->ethtool_ops->set_ufo(dev, edata.data); 1178 return dev->ethtool_ops->set_ufo(dev, edata.data);
1177} 1179}
diff --git a/net/core/filter.c b/net/core/filter.c
index ae21a0d3c4a..e193e29d467 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -37,9 +37,58 @@
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <linux/filter.h> 39#include <linux/filter.h>
40#include <linux/reciprocal_div.h>
41
42enum {
43 BPF_S_RET_K = 1,
44 BPF_S_RET_A,
45 BPF_S_ALU_ADD_K,
46 BPF_S_ALU_ADD_X,
47 BPF_S_ALU_SUB_K,
48 BPF_S_ALU_SUB_X,
49 BPF_S_ALU_MUL_K,
50 BPF_S_ALU_MUL_X,
51 BPF_S_ALU_DIV_X,
52 BPF_S_ALU_AND_K,
53 BPF_S_ALU_AND_X,
54 BPF_S_ALU_OR_K,
55 BPF_S_ALU_OR_X,
56 BPF_S_ALU_LSH_K,
57 BPF_S_ALU_LSH_X,
58 BPF_S_ALU_RSH_K,
59 BPF_S_ALU_RSH_X,
60 BPF_S_ALU_NEG,
61 BPF_S_LD_W_ABS,
62 BPF_S_LD_H_ABS,
63 BPF_S_LD_B_ABS,
64 BPF_S_LD_W_LEN,
65 BPF_S_LD_W_IND,
66 BPF_S_LD_H_IND,
67 BPF_S_LD_B_IND,
68 BPF_S_LD_IMM,
69 BPF_S_LDX_W_LEN,
70 BPF_S_LDX_B_MSH,
71 BPF_S_LDX_IMM,
72 BPF_S_MISC_TAX,
73 BPF_S_MISC_TXA,
74 BPF_S_ALU_DIV_K,
75 BPF_S_LD_MEM,
76 BPF_S_LDX_MEM,
77 BPF_S_ST,
78 BPF_S_STX,
79 BPF_S_JMP_JA,
80 BPF_S_JMP_JEQ_K,
81 BPF_S_JMP_JEQ_X,
82 BPF_S_JMP_JGE_K,
83 BPF_S_JMP_JGE_X,
84 BPF_S_JMP_JGT_K,
85 BPF_S_JMP_JGT_X,
86 BPF_S_JMP_JSET_K,
87 BPF_S_JMP_JSET_X,
88};
40 89
41/* No hurry in this branch */ 90/* No hurry in this branch */
42static void *__load_pointer(struct sk_buff *skb, int k) 91static void *__load_pointer(const struct sk_buff *skb, int k)
43{ 92{
44 u8 *ptr = NULL; 93 u8 *ptr = NULL;
45 94
@@ -53,7 +102,7 @@ static void *__load_pointer(struct sk_buff *skb, int k)
53 return NULL; 102 return NULL;
54} 103}
55 104
56static inline void *load_pointer(struct sk_buff *skb, int k, 105static inline void *load_pointer(const struct sk_buff *skb, int k,
57 unsigned int size, void *buffer) 106 unsigned int size, void *buffer)
58{ 107{
59 if (k >= 0) 108 if (k >= 0)
@@ -89,7 +138,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
89 rcu_read_lock_bh(); 138 rcu_read_lock_bh();
90 filter = rcu_dereference_bh(sk->sk_filter); 139 filter = rcu_dereference_bh(sk->sk_filter);
91 if (filter) { 140 if (filter) {
92 unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len); 141 unsigned int pkt_len = sk_run_filter(skb, filter->insns);
93 142
94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 143 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
95 } 144 }
@@ -103,50 +152,52 @@ EXPORT_SYMBOL(sk_filter);
103 * sk_run_filter - run a filter on a socket 152 * sk_run_filter - run a filter on a socket
104 * @skb: buffer to run the filter on 153 * @skb: buffer to run the filter on
105 * @filter: filter to apply 154 * @filter: filter to apply
106 * @flen: length of filter
107 * 155 *
108 * Decode and apply filter instructions to the skb->data. 156 * Decode and apply filter instructions to the skb->data.
109 * Return length to keep, 0 for none. skb is the data we are 157 * Return length to keep, 0 for none. @skb is the data we are
110 * filtering, filter is the array of filter instructions, and 158 * filtering, @filter is the array of filter instructions.
111 * len is the number of filter blocks in the array. 159 * Because all jumps are guaranteed to be before last instruction,
160 * and last instruction guaranteed to be a RET, we dont need to check
161 * flen. (We used to pass to this function the length of filter)
112 */ 162 */
113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) 163unsigned int sk_run_filter(const struct sk_buff *skb,
164 const struct sock_filter *fentry)
114{ 165{
115 void *ptr; 166 void *ptr;
116 u32 A = 0; /* Accumulator */ 167 u32 A = 0; /* Accumulator */
117 u32 X = 0; /* Index Register */ 168 u32 X = 0; /* Index Register */
118 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ 169 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
119 unsigned long memvalid = 0;
120 u32 tmp; 170 u32 tmp;
121 int k; 171 int k;
122 int pc;
123 172
124 BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
125 /* 173 /*
126 * Process array of filter instructions. 174 * Process array of filter instructions.
127 */ 175 */
128 for (pc = 0; pc < flen; pc++) { 176 for (;; fentry++) {
129 const struct sock_filter *fentry = &filter[pc]; 177#if defined(CONFIG_X86_32)
130 u32 f_k = fentry->k; 178#define K (fentry->k)
179#else
180 const u32 K = fentry->k;
181#endif
131 182
132 switch (fentry->code) { 183 switch (fentry->code) {
133 case BPF_S_ALU_ADD_X: 184 case BPF_S_ALU_ADD_X:
134 A += X; 185 A += X;
135 continue; 186 continue;
136 case BPF_S_ALU_ADD_K: 187 case BPF_S_ALU_ADD_K:
137 A += f_k; 188 A += K;
138 continue; 189 continue;
139 case BPF_S_ALU_SUB_X: 190 case BPF_S_ALU_SUB_X:
140 A -= X; 191 A -= X;
141 continue; 192 continue;
142 case BPF_S_ALU_SUB_K: 193 case BPF_S_ALU_SUB_K:
143 A -= f_k; 194 A -= K;
144 continue; 195 continue;
145 case BPF_S_ALU_MUL_X: 196 case BPF_S_ALU_MUL_X:
146 A *= X; 197 A *= X;
147 continue; 198 continue;
148 case BPF_S_ALU_MUL_K: 199 case BPF_S_ALU_MUL_K:
149 A *= f_k; 200 A *= K;
150 continue; 201 continue;
151 case BPF_S_ALU_DIV_X: 202 case BPF_S_ALU_DIV_X:
152 if (X == 0) 203 if (X == 0)
@@ -154,64 +205,64 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
154 A /= X; 205 A /= X;
155 continue; 206 continue;
156 case BPF_S_ALU_DIV_K: 207 case BPF_S_ALU_DIV_K:
157 A /= f_k; 208 A = reciprocal_divide(A, K);
158 continue; 209 continue;
159 case BPF_S_ALU_AND_X: 210 case BPF_S_ALU_AND_X:
160 A &= X; 211 A &= X;
161 continue; 212 continue;
162 case BPF_S_ALU_AND_K: 213 case BPF_S_ALU_AND_K:
163 A &= f_k; 214 A &= K;
164 continue; 215 continue;
165 case BPF_S_ALU_OR_X: 216 case BPF_S_ALU_OR_X:
166 A |= X; 217 A |= X;
167 continue; 218 continue;
168 case BPF_S_ALU_OR_K: 219 case BPF_S_ALU_OR_K:
169 A |= f_k; 220 A |= K;
170 continue; 221 continue;
171 case BPF_S_ALU_LSH_X: 222 case BPF_S_ALU_LSH_X:
172 A <<= X; 223 A <<= X;
173 continue; 224 continue;
174 case BPF_S_ALU_LSH_K: 225 case BPF_S_ALU_LSH_K:
175 A <<= f_k; 226 A <<= K;
176 continue; 227 continue;
177 case BPF_S_ALU_RSH_X: 228 case BPF_S_ALU_RSH_X:
178 A >>= X; 229 A >>= X;
179 continue; 230 continue;
180 case BPF_S_ALU_RSH_K: 231 case BPF_S_ALU_RSH_K:
181 A >>= f_k; 232 A >>= K;
182 continue; 233 continue;
183 case BPF_S_ALU_NEG: 234 case BPF_S_ALU_NEG:
184 A = -A; 235 A = -A;
185 continue; 236 continue;
186 case BPF_S_JMP_JA: 237 case BPF_S_JMP_JA:
187 pc += f_k; 238 fentry += K;
188 continue; 239 continue;
189 case BPF_S_JMP_JGT_K: 240 case BPF_S_JMP_JGT_K:
190 pc += (A > f_k) ? fentry->jt : fentry->jf; 241 fentry += (A > K) ? fentry->jt : fentry->jf;
191 continue; 242 continue;
192 case BPF_S_JMP_JGE_K: 243 case BPF_S_JMP_JGE_K:
193 pc += (A >= f_k) ? fentry->jt : fentry->jf; 244 fentry += (A >= K) ? fentry->jt : fentry->jf;
194 continue; 245 continue;
195 case BPF_S_JMP_JEQ_K: 246 case BPF_S_JMP_JEQ_K:
196 pc += (A == f_k) ? fentry->jt : fentry->jf; 247 fentry += (A == K) ? fentry->jt : fentry->jf;
197 continue; 248 continue;
198 case BPF_S_JMP_JSET_K: 249 case BPF_S_JMP_JSET_K:
199 pc += (A & f_k) ? fentry->jt : fentry->jf; 250 fentry += (A & K) ? fentry->jt : fentry->jf;
200 continue; 251 continue;
201 case BPF_S_JMP_JGT_X: 252 case BPF_S_JMP_JGT_X:
202 pc += (A > X) ? fentry->jt : fentry->jf; 253 fentry += (A > X) ? fentry->jt : fentry->jf;
203 continue; 254 continue;
204 case BPF_S_JMP_JGE_X: 255 case BPF_S_JMP_JGE_X:
205 pc += (A >= X) ? fentry->jt : fentry->jf; 256 fentry += (A >= X) ? fentry->jt : fentry->jf;
206 continue; 257 continue;
207 case BPF_S_JMP_JEQ_X: 258 case BPF_S_JMP_JEQ_X:
208 pc += (A == X) ? fentry->jt : fentry->jf; 259 fentry += (A == X) ? fentry->jt : fentry->jf;
209 continue; 260 continue;
210 case BPF_S_JMP_JSET_X: 261 case BPF_S_JMP_JSET_X:
211 pc += (A & X) ? fentry->jt : fentry->jf; 262 fentry += (A & X) ? fentry->jt : fentry->jf;
212 continue; 263 continue;
213 case BPF_S_LD_W_ABS: 264 case BPF_S_LD_W_ABS:
214 k = f_k; 265 k = K;
215load_w: 266load_w:
216 ptr = load_pointer(skb, k, 4, &tmp); 267 ptr = load_pointer(skb, k, 4, &tmp);
217 if (ptr != NULL) { 268 if (ptr != NULL) {
@@ -220,7 +271,7 @@ load_w:
220 } 271 }
221 break; 272 break;
222 case BPF_S_LD_H_ABS: 273 case BPF_S_LD_H_ABS:
223 k = f_k; 274 k = K;
224load_h: 275load_h:
225 ptr = load_pointer(skb, k, 2, &tmp); 276 ptr = load_pointer(skb, k, 2, &tmp);
226 if (ptr != NULL) { 277 if (ptr != NULL) {
@@ -229,7 +280,7 @@ load_h:
229 } 280 }
230 break; 281 break;
231 case BPF_S_LD_B_ABS: 282 case BPF_S_LD_B_ABS:
232 k = f_k; 283 k = K;
233load_b: 284load_b:
234 ptr = load_pointer(skb, k, 1, &tmp); 285 ptr = load_pointer(skb, k, 1, &tmp);
235 if (ptr != NULL) { 286 if (ptr != NULL) {
@@ -244,34 +295,32 @@ load_b:
244 X = skb->len; 295 X = skb->len;
245 continue; 296 continue;
246 case BPF_S_LD_W_IND: 297 case BPF_S_LD_W_IND:
247 k = X + f_k; 298 k = X + K;
248 goto load_w; 299 goto load_w;
249 case BPF_S_LD_H_IND: 300 case BPF_S_LD_H_IND:
250 k = X + f_k; 301 k = X + K;
251 goto load_h; 302 goto load_h;
252 case BPF_S_LD_B_IND: 303 case BPF_S_LD_B_IND:
253 k = X + f_k; 304 k = X + K;
254 goto load_b; 305 goto load_b;
255 case BPF_S_LDX_B_MSH: 306 case BPF_S_LDX_B_MSH:
256 ptr = load_pointer(skb, f_k, 1, &tmp); 307 ptr = load_pointer(skb, K, 1, &tmp);
257 if (ptr != NULL) { 308 if (ptr != NULL) {
258 X = (*(u8 *)ptr & 0xf) << 2; 309 X = (*(u8 *)ptr & 0xf) << 2;
259 continue; 310 continue;
260 } 311 }
261 return 0; 312 return 0;
262 case BPF_S_LD_IMM: 313 case BPF_S_LD_IMM:
263 A = f_k; 314 A = K;
264 continue; 315 continue;
265 case BPF_S_LDX_IMM: 316 case BPF_S_LDX_IMM:
266 X = f_k; 317 X = K;
267 continue; 318 continue;
268 case BPF_S_LD_MEM: 319 case BPF_S_LD_MEM:
269 A = (memvalid & (1UL << f_k)) ? 320 A = mem[K];
270 mem[f_k] : 0;
271 continue; 321 continue;
272 case BPF_S_LDX_MEM: 322 case BPF_S_LDX_MEM:
273 X = (memvalid & (1UL << f_k)) ? 323 X = mem[K];
274 mem[f_k] : 0;
275 continue; 324 continue;
276 case BPF_S_MISC_TAX: 325 case BPF_S_MISC_TAX:
277 X = A; 326 X = A;
@@ -280,16 +329,14 @@ load_b:
280 A = X; 329 A = X;
281 continue; 330 continue;
282 case BPF_S_RET_K: 331 case BPF_S_RET_K:
283 return f_k; 332 return K;
284 case BPF_S_RET_A: 333 case BPF_S_RET_A:
285 return A; 334 return A;
286 case BPF_S_ST: 335 case BPF_S_ST:
287 memvalid |= 1UL << f_k; 336 mem[K] = A;
288 mem[f_k] = A;
289 continue; 337 continue;
290 case BPF_S_STX: 338 case BPF_S_STX:
291 memvalid |= 1UL << f_k; 339 mem[K] = X;
292 mem[f_k] = X;
293 continue; 340 continue;
294 default: 341 default:
295 WARN_ON(1); 342 WARN_ON(1);
@@ -323,6 +370,12 @@ load_b:
323 return 0; 370 return 0;
324 A = skb->dev->type; 371 A = skb->dev->type;
325 continue; 372 continue;
373 case SKF_AD_RXHASH:
374 A = skb->rxhash;
375 continue;
376 case SKF_AD_CPU:
377 A = raw_smp_processor_id();
378 continue;
326 case SKF_AD_NLATTR: { 379 case SKF_AD_NLATTR: {
327 struct nlattr *nla; 380 struct nlattr *nla;
328 381
@@ -367,6 +420,66 @@ load_b:
367} 420}
368EXPORT_SYMBOL(sk_run_filter); 421EXPORT_SYMBOL(sk_run_filter);
369 422
423/*
424 * Security :
425 * A BPF program is able to use 16 cells of memory to store intermediate
426 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
427 * As we dont want to clear mem[] array for each packet going through
428 * sk_run_filter(), we check that filter loaded by user never try to read
429 * a cell if not previously written, and we check all branches to be sure
430 * a malicious user doesnt try to abuse us.
431 */
432static int check_load_and_stores(struct sock_filter *filter, int flen)
433{
434 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
435 int pc, ret = 0;
436
437 BUILD_BUG_ON(BPF_MEMWORDS > 16);
438 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
439 if (!masks)
440 return -ENOMEM;
441 memset(masks, 0xff, flen * sizeof(*masks));
442
443 for (pc = 0; pc < flen; pc++) {
444 memvalid &= masks[pc];
445
446 switch (filter[pc].code) {
447 case BPF_S_ST:
448 case BPF_S_STX:
449 memvalid |= (1 << filter[pc].k);
450 break;
451 case BPF_S_LD_MEM:
452 case BPF_S_LDX_MEM:
453 if (!(memvalid & (1 << filter[pc].k))) {
454 ret = -EINVAL;
455 goto error;
456 }
457 break;
458 case BPF_S_JMP_JA:
459 /* a jump must set masks on target */
460 masks[pc + 1 + filter[pc].k] &= memvalid;
461 memvalid = ~0;
462 break;
463 case BPF_S_JMP_JEQ_K:
464 case BPF_S_JMP_JEQ_X:
465 case BPF_S_JMP_JGE_K:
466 case BPF_S_JMP_JGE_X:
467 case BPF_S_JMP_JGT_K:
468 case BPF_S_JMP_JGT_X:
469 case BPF_S_JMP_JSET_X:
470 case BPF_S_JMP_JSET_K:
471 /* a jump must set masks on targets */
472 masks[pc + 1 + filter[pc].jt] &= memvalid;
473 masks[pc + 1 + filter[pc].jf] &= memvalid;
474 memvalid = ~0;
475 break;
476 }
477 }
478error:
479 kfree(masks);
480 return ret;
481}
482
370/** 483/**
371 * sk_chk_filter - verify socket filter code 484 * sk_chk_filter - verify socket filter code
372 * @filter: filter to verify 485 * @filter: filter to verify
@@ -383,7 +496,57 @@ EXPORT_SYMBOL(sk_run_filter);
383 */ 496 */
384int sk_chk_filter(struct sock_filter *filter, int flen) 497int sk_chk_filter(struct sock_filter *filter, int flen)
385{ 498{
386 struct sock_filter *ftest; 499 /*
500 * Valid instructions are initialized to non-0.
501 * Invalid instructions are initialized to 0.
502 */
503 static const u8 codes[] = {
504 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
505 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
506 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
507 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
508 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
509 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
510 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
511 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
512 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
513 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
514 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
515 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
516 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
517 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
518 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
519 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
520 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
521 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
522 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
523 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
524 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
525 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
526 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
527 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
528 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
529 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
530 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
531 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
532 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
533 [BPF_RET|BPF_K] = BPF_S_RET_K,
534 [BPF_RET|BPF_A] = BPF_S_RET_A,
535 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
536 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
537 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
538 [BPF_ST] = BPF_S_ST,
539 [BPF_STX] = BPF_S_STX,
540 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
541 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
542 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
543 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
544 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
545 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
546 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
547 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
548 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
549 };
387 int pc; 550 int pc;
388 551
389 if (flen == 0 || flen > BPF_MAXINSNS) 552 if (flen == 0 || flen > BPF_MAXINSNS)
@@ -391,136 +554,31 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
391 554
392 /* check the filter code now */ 555 /* check the filter code now */
393 for (pc = 0; pc < flen; pc++) { 556 for (pc = 0; pc < flen; pc++) {
394 ftest = &filter[pc]; 557 struct sock_filter *ftest = &filter[pc];
395 558 u16 code = ftest->code;
396 /* Only allow valid instructions */
397 switch (ftest->code) {
398 case BPF_ALU|BPF_ADD|BPF_K:
399 ftest->code = BPF_S_ALU_ADD_K;
400 break;
401 case BPF_ALU|BPF_ADD|BPF_X:
402 ftest->code = BPF_S_ALU_ADD_X;
403 break;
404 case BPF_ALU|BPF_SUB|BPF_K:
405 ftest->code = BPF_S_ALU_SUB_K;
406 break;
407 case BPF_ALU|BPF_SUB|BPF_X:
408 ftest->code = BPF_S_ALU_SUB_X;
409 break;
410 case BPF_ALU|BPF_MUL|BPF_K:
411 ftest->code = BPF_S_ALU_MUL_K;
412 break;
413 case BPF_ALU|BPF_MUL|BPF_X:
414 ftest->code = BPF_S_ALU_MUL_X;
415 break;
416 case BPF_ALU|BPF_DIV|BPF_X:
417 ftest->code = BPF_S_ALU_DIV_X;
418 break;
419 case BPF_ALU|BPF_AND|BPF_K:
420 ftest->code = BPF_S_ALU_AND_K;
421 break;
422 case BPF_ALU|BPF_AND|BPF_X:
423 ftest->code = BPF_S_ALU_AND_X;
424 break;
425 case BPF_ALU|BPF_OR|BPF_K:
426 ftest->code = BPF_S_ALU_OR_K;
427 break;
428 case BPF_ALU|BPF_OR|BPF_X:
429 ftest->code = BPF_S_ALU_OR_X;
430 break;
431 case BPF_ALU|BPF_LSH|BPF_K:
432 ftest->code = BPF_S_ALU_LSH_K;
433 break;
434 case BPF_ALU|BPF_LSH|BPF_X:
435 ftest->code = BPF_S_ALU_LSH_X;
436 break;
437 case BPF_ALU|BPF_RSH|BPF_K:
438 ftest->code = BPF_S_ALU_RSH_K;
439 break;
440 case BPF_ALU|BPF_RSH|BPF_X:
441 ftest->code = BPF_S_ALU_RSH_X;
442 break;
443 case BPF_ALU|BPF_NEG:
444 ftest->code = BPF_S_ALU_NEG;
445 break;
446 case BPF_LD|BPF_W|BPF_ABS:
447 ftest->code = BPF_S_LD_W_ABS;
448 break;
449 case BPF_LD|BPF_H|BPF_ABS:
450 ftest->code = BPF_S_LD_H_ABS;
451 break;
452 case BPF_LD|BPF_B|BPF_ABS:
453 ftest->code = BPF_S_LD_B_ABS;
454 break;
455 case BPF_LD|BPF_W|BPF_LEN:
456 ftest->code = BPF_S_LD_W_LEN;
457 break;
458 case BPF_LD|BPF_W|BPF_IND:
459 ftest->code = BPF_S_LD_W_IND;
460 break;
461 case BPF_LD|BPF_H|BPF_IND:
462 ftest->code = BPF_S_LD_H_IND;
463 break;
464 case BPF_LD|BPF_B|BPF_IND:
465 ftest->code = BPF_S_LD_B_IND;
466 break;
467 case BPF_LD|BPF_IMM:
468 ftest->code = BPF_S_LD_IMM;
469 break;
470 case BPF_LDX|BPF_W|BPF_LEN:
471 ftest->code = BPF_S_LDX_W_LEN;
472 break;
473 case BPF_LDX|BPF_B|BPF_MSH:
474 ftest->code = BPF_S_LDX_B_MSH;
475 break;
476 case BPF_LDX|BPF_IMM:
477 ftest->code = BPF_S_LDX_IMM;
478 break;
479 case BPF_MISC|BPF_TAX:
480 ftest->code = BPF_S_MISC_TAX;
481 break;
482 case BPF_MISC|BPF_TXA:
483 ftest->code = BPF_S_MISC_TXA;
484 break;
485 case BPF_RET|BPF_K:
486 ftest->code = BPF_S_RET_K;
487 break;
488 case BPF_RET|BPF_A:
489 ftest->code = BPF_S_RET_A;
490 break;
491 559
560 if (code >= ARRAY_SIZE(codes))
561 return -EINVAL;
562 code = codes[code];
563 if (!code)
564 return -EINVAL;
492 /* Some instructions need special checks */ 565 /* Some instructions need special checks */
493 566 switch (code) {
567 case BPF_S_ALU_DIV_K:
494 /* check for division by zero */ 568 /* check for division by zero */
495 case BPF_ALU|BPF_DIV|BPF_K:
496 if (ftest->k == 0) 569 if (ftest->k == 0)
497 return -EINVAL; 570 return -EINVAL;
498 ftest->code = BPF_S_ALU_DIV_K; 571 ftest->k = reciprocal_value(ftest->k);
499 break;
500
501 /* check for invalid memory addresses */
502 case BPF_LD|BPF_MEM:
503 if (ftest->k >= BPF_MEMWORDS)
504 return -EINVAL;
505 ftest->code = BPF_S_LD_MEM;
506 break;
507 case BPF_LDX|BPF_MEM:
508 if (ftest->k >= BPF_MEMWORDS)
509 return -EINVAL;
510 ftest->code = BPF_S_LDX_MEM;
511 break;
512 case BPF_ST:
513 if (ftest->k >= BPF_MEMWORDS)
514 return -EINVAL;
515 ftest->code = BPF_S_ST;
516 break; 572 break;
517 case BPF_STX: 573 case BPF_S_LD_MEM:
574 case BPF_S_LDX_MEM:
575 case BPF_S_ST:
576 case BPF_S_STX:
577 /* check for invalid memory addresses */
518 if (ftest->k >= BPF_MEMWORDS) 578 if (ftest->k >= BPF_MEMWORDS)
519 return -EINVAL; 579 return -EINVAL;
520 ftest->code = BPF_S_STX;
521 break; 580 break;
522 581 case BPF_S_JMP_JA:
523 case BPF_JMP|BPF_JA:
524 /* 582 /*
525 * Note, the large ftest->k might cause loops. 583 * Note, the large ftest->k might cause loops.
526 * Compare this with conditional jumps below, 584 * Compare this with conditional jumps below,
@@ -528,40 +586,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
528 */ 586 */
529 if (ftest->k >= (unsigned)(flen-pc-1)) 587 if (ftest->k >= (unsigned)(flen-pc-1))
530 return -EINVAL; 588 return -EINVAL;
531 ftest->code = BPF_S_JMP_JA;
532 break;
533
534 case BPF_JMP|BPF_JEQ|BPF_K:
535 ftest->code = BPF_S_JMP_JEQ_K;
536 break;
537 case BPF_JMP|BPF_JEQ|BPF_X:
538 ftest->code = BPF_S_JMP_JEQ_X;
539 break;
540 case BPF_JMP|BPF_JGE|BPF_K:
541 ftest->code = BPF_S_JMP_JGE_K;
542 break;
543 case BPF_JMP|BPF_JGE|BPF_X:
544 ftest->code = BPF_S_JMP_JGE_X;
545 break;
546 case BPF_JMP|BPF_JGT|BPF_K:
547 ftest->code = BPF_S_JMP_JGT_K;
548 break;
549 case BPF_JMP|BPF_JGT|BPF_X:
550 ftest->code = BPF_S_JMP_JGT_X;
551 break;
552 case BPF_JMP|BPF_JSET|BPF_K:
553 ftest->code = BPF_S_JMP_JSET_K;
554 break; 589 break;
555 case BPF_JMP|BPF_JSET|BPF_X:
556 ftest->code = BPF_S_JMP_JSET_X;
557 break;
558
559 default:
560 return -EINVAL;
561 }
562
563 /* for conditionals both must be safe */
564 switch (ftest->code) {
565 case BPF_S_JMP_JEQ_K: 590 case BPF_S_JMP_JEQ_K:
566 case BPF_S_JMP_JEQ_X: 591 case BPF_S_JMP_JEQ_X:
567 case BPF_S_JMP_JGE_K: 592 case BPF_S_JMP_JGE_K:
@@ -570,21 +595,22 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
570 case BPF_S_JMP_JGT_X: 595 case BPF_S_JMP_JGT_X:
571 case BPF_S_JMP_JSET_X: 596 case BPF_S_JMP_JSET_X:
572 case BPF_S_JMP_JSET_K: 597 case BPF_S_JMP_JSET_K:
598 /* for conditionals both must be safe */
573 if (pc + ftest->jt + 1 >= flen || 599 if (pc + ftest->jt + 1 >= flen ||
574 pc + ftest->jf + 1 >= flen) 600 pc + ftest->jf + 1 >= flen)
575 return -EINVAL; 601 return -EINVAL;
602 break;
576 } 603 }
604 ftest->code = code;
577 } 605 }
578 606
579 /* last instruction must be a RET code */ 607 /* last instruction must be a RET code */
580 switch (filter[flen - 1].code) { 608 switch (filter[flen - 1].code) {
581 case BPF_S_RET_K: 609 case BPF_S_RET_K:
582 case BPF_S_RET_A: 610 case BPF_S_RET_A:
583 return 0; 611 return check_load_and_stores(filter, flen);
584 break; 612 }
585 default: 613 return -EINVAL;
586 return -EINVAL;
587 }
588} 614}
589EXPORT_SYMBOL(sk_chk_filter); 615EXPORT_SYMBOL(sk_chk_filter);
590 616
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7f902cad10f..85e8b5326dd 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -706,7 +706,6 @@ static struct attribute *rx_queue_default_attrs[] = {
706static void rx_queue_release(struct kobject *kobj) 706static void rx_queue_release(struct kobject *kobj)
707{ 707{
708 struct netdev_rx_queue *queue = to_rx_queue(kobj); 708 struct netdev_rx_queue *queue = to_rx_queue(kobj);
709 struct netdev_rx_queue *first = queue->first;
710 struct rps_map *map; 709 struct rps_map *map;
711 struct rps_dev_flow_table *flow_table; 710 struct rps_dev_flow_table *flow_table;
712 711
@@ -723,10 +722,8 @@ static void rx_queue_release(struct kobject *kobj)
723 call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 722 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
724 } 723 }
725 724
726 if (atomic_dec_and_test(&first->count)) 725 memset(kobj, 0, sizeof(*kobj));
727 kfree(first); 726 dev_put(queue->dev);
728 else
729 memset(kobj, 0, sizeof(*kobj));
730} 727}
731 728
732static struct kobj_type rx_queue_ktype = { 729static struct kobj_type rx_queue_ktype = {
@@ -738,7 +735,6 @@ static struct kobj_type rx_queue_ktype = {
738static int rx_queue_add_kobject(struct net_device *net, int index) 735static int rx_queue_add_kobject(struct net_device *net, int index)
739{ 736{
740 struct netdev_rx_queue *queue = net->_rx + index; 737 struct netdev_rx_queue *queue = net->_rx + index;
741 struct netdev_rx_queue *first = queue->first;
742 struct kobject *kobj = &queue->kobj; 738 struct kobject *kobj = &queue->kobj;
743 int error = 0; 739 int error = 0;
744 740
@@ -751,14 +747,16 @@ static int rx_queue_add_kobject(struct net_device *net, int index)
751 } 747 }
752 748
753 kobject_uevent(kobj, KOBJ_ADD); 749 kobject_uevent(kobj, KOBJ_ADD);
754 atomic_inc(&first->count); 750 dev_hold(queue->dev);
755 751
756 return error; 752 return error;
757} 753}
754#endif /* CONFIG_RPS */
758 755
759int 756int
760net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num) 757net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
761{ 758{
759#ifdef CONFIG_RPS
762 int i; 760 int i;
763 int error = 0; 761 int error = 0;
764 762
@@ -774,23 +772,422 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
774 kobject_put(&net->_rx[i].kobj); 772 kobject_put(&net->_rx[i].kobj);
775 773
776 return error; 774 return error;
775#else
776 return 0;
777#endif
778}
779
780#ifdef CONFIG_XPS
781/*
782 * netdev_queue sysfs structures and functions.
783 */
784struct netdev_queue_attribute {
785 struct attribute attr;
786 ssize_t (*show)(struct netdev_queue *queue,
787 struct netdev_queue_attribute *attr, char *buf);
788 ssize_t (*store)(struct netdev_queue *queue,
789 struct netdev_queue_attribute *attr, const char *buf, size_t len);
790};
791#define to_netdev_queue_attr(_attr) container_of(_attr, \
792 struct netdev_queue_attribute, attr)
793
794#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
795
796static ssize_t netdev_queue_attr_show(struct kobject *kobj,
797 struct attribute *attr, char *buf)
798{
799 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
800 struct netdev_queue *queue = to_netdev_queue(kobj);
801
802 if (!attribute->show)
803 return -EIO;
804
805 return attribute->show(queue, attribute, buf);
777} 806}
778 807
779static int rx_queue_register_kobjects(struct net_device *net) 808static ssize_t netdev_queue_attr_store(struct kobject *kobj,
809 struct attribute *attr,
810 const char *buf, size_t count)
780{ 811{
812 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
813 struct netdev_queue *queue = to_netdev_queue(kobj);
814
815 if (!attribute->store)
816 return -EIO;
817
818 return attribute->store(queue, attribute, buf, count);
819}
820
821static const struct sysfs_ops netdev_queue_sysfs_ops = {
822 .show = netdev_queue_attr_show,
823 .store = netdev_queue_attr_store,
824};
825
826static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
827{
828 struct net_device *dev = queue->dev;
829 int i;
830
831 for (i = 0; i < dev->num_tx_queues; i++)
832 if (queue == &dev->_tx[i])
833 break;
834
835 BUG_ON(i >= dev->num_tx_queues);
836
837 return i;
838}
839
840
841static ssize_t show_xps_map(struct netdev_queue *queue,
842 struct netdev_queue_attribute *attribute, char *buf)
843{
844 struct net_device *dev = queue->dev;
845 struct xps_dev_maps *dev_maps;
846 cpumask_var_t mask;
847 unsigned long index;
848 size_t len = 0;
849 int i;
850
851 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
852 return -ENOMEM;
853
854 index = get_netdev_queue_index(queue);
855
856 rcu_read_lock();
857 dev_maps = rcu_dereference(dev->xps_maps);
858 if (dev_maps) {
859 for_each_possible_cpu(i) {
860 struct xps_map *map =
861 rcu_dereference(dev_maps->cpu_map[i]);
862 if (map) {
863 int j;
864 for (j = 0; j < map->len; j++) {
865 if (map->queues[j] == index) {
866 cpumask_set_cpu(i, mask);
867 break;
868 }
869 }
870 }
871 }
872 }
873 rcu_read_unlock();
874
875 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
876 if (PAGE_SIZE - len < 3) {
877 free_cpumask_var(mask);
878 return -EINVAL;
879 }
880
881 free_cpumask_var(mask);
882 len += sprintf(buf + len, "\n");
883 return len;
884}
885
886static void xps_map_release(struct rcu_head *rcu)
887{
888 struct xps_map *map = container_of(rcu, struct xps_map, rcu);
889
890 kfree(map);
891}
892
893static void xps_dev_maps_release(struct rcu_head *rcu)
894{
895 struct xps_dev_maps *dev_maps =
896 container_of(rcu, struct xps_dev_maps, rcu);
897
898 kfree(dev_maps);
899}
900
901static DEFINE_MUTEX(xps_map_mutex);
902#define xmap_dereference(P) \
903 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
904
905static ssize_t store_xps_map(struct netdev_queue *queue,
906 struct netdev_queue_attribute *attribute,
907 const char *buf, size_t len)
908{
909 struct net_device *dev = queue->dev;
910 cpumask_var_t mask;
911 int err, i, cpu, pos, map_len, alloc_len, need_set;
912 unsigned long index;
913 struct xps_map *map, *new_map;
914 struct xps_dev_maps *dev_maps, *new_dev_maps;
915 int nonempty = 0;
916 int numa_node = -2;
917
918 if (!capable(CAP_NET_ADMIN))
919 return -EPERM;
920
921 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
922 return -ENOMEM;
923
924 index = get_netdev_queue_index(queue);
925
926 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
927 if (err) {
928 free_cpumask_var(mask);
929 return err;
930 }
931
932 new_dev_maps = kzalloc(max_t(unsigned,
933 XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
934 if (!new_dev_maps) {
935 free_cpumask_var(mask);
936 return -ENOMEM;
937 }
938
939 mutex_lock(&xps_map_mutex);
940
941 dev_maps = xmap_dereference(dev->xps_maps);
942
943 for_each_possible_cpu(cpu) {
944 map = dev_maps ?
945 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
946 new_map = map;
947 if (map) {
948 for (pos = 0; pos < map->len; pos++)
949 if (map->queues[pos] == index)
950 break;
951 map_len = map->len;
952 alloc_len = map->alloc_len;
953 } else
954 pos = map_len = alloc_len = 0;
955
956 need_set = cpu_isset(cpu, *mask) && cpu_online(cpu);
957#ifdef CONFIG_NUMA
958 if (need_set) {
959 if (numa_node == -2)
960 numa_node = cpu_to_node(cpu);
961 else if (numa_node != cpu_to_node(cpu))
962 numa_node = -1;
963 }
964#endif
965 if (need_set && pos >= map_len) {
966 /* Need to add queue to this CPU's map */
967 if (map_len >= alloc_len) {
968 alloc_len = alloc_len ?
969 2 * alloc_len : XPS_MIN_MAP_ALLOC;
970 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
971 GFP_KERNEL,
972 cpu_to_node(cpu));
973 if (!new_map)
974 goto error;
975 new_map->alloc_len = alloc_len;
976 for (i = 0; i < map_len; i++)
977 new_map->queues[i] = map->queues[i];
978 new_map->len = map_len;
979 }
980 new_map->queues[new_map->len++] = index;
981 } else if (!need_set && pos < map_len) {
982 /* Need to remove queue from this CPU's map */
983 if (map_len > 1)
984 new_map->queues[pos] =
985 new_map->queues[--new_map->len];
986 else
987 new_map = NULL;
988 }
989 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
990 }
991
992 /* Cleanup old maps */
993 for_each_possible_cpu(cpu) {
994 map = dev_maps ?
995 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
996 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
997 call_rcu(&map->rcu, xps_map_release);
998 if (new_dev_maps->cpu_map[cpu])
999 nonempty = 1;
1000 }
1001
1002 if (nonempty)
1003 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1004 else {
1005 kfree(new_dev_maps);
1006 rcu_assign_pointer(dev->xps_maps, NULL);
1007 }
1008
1009 if (dev_maps)
1010 call_rcu(&dev_maps->rcu, xps_dev_maps_release);
1011
1012 netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : -1);
1013
1014 mutex_unlock(&xps_map_mutex);
1015
1016 free_cpumask_var(mask);
1017 return len;
1018
1019error:
1020 mutex_unlock(&xps_map_mutex);
1021
1022 if (new_dev_maps)
1023 for_each_possible_cpu(i)
1024 kfree(rcu_dereference_protected(
1025 new_dev_maps->cpu_map[i],
1026 1));
1027 kfree(new_dev_maps);
1028 free_cpumask_var(mask);
1029 return -ENOMEM;
1030}
1031
1032static struct netdev_queue_attribute xps_cpus_attribute =
1033 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1034
1035static struct attribute *netdev_queue_default_attrs[] = {
1036 &xps_cpus_attribute.attr,
1037 NULL
1038};
1039
1040static void netdev_queue_release(struct kobject *kobj)
1041{
1042 struct netdev_queue *queue = to_netdev_queue(kobj);
1043 struct net_device *dev = queue->dev;
1044 struct xps_dev_maps *dev_maps;
1045 struct xps_map *map;
1046 unsigned long index;
1047 int i, pos, nonempty = 0;
1048
1049 index = get_netdev_queue_index(queue);
1050
1051 mutex_lock(&xps_map_mutex);
1052 dev_maps = xmap_dereference(dev->xps_maps);
1053
1054 if (dev_maps) {
1055 for_each_possible_cpu(i) {
1056 map = xmap_dereference(dev_maps->cpu_map[i]);
1057 if (!map)
1058 continue;
1059
1060 for (pos = 0; pos < map->len; pos++)
1061 if (map->queues[pos] == index)
1062 break;
1063
1064 if (pos < map->len) {
1065 if (map->len > 1)
1066 map->queues[pos] =
1067 map->queues[--map->len];
1068 else {
1069 RCU_INIT_POINTER(dev_maps->cpu_map[i],
1070 NULL);
1071 call_rcu(&map->rcu, xps_map_release);
1072 map = NULL;
1073 }
1074 }
1075 if (map)
1076 nonempty = 1;
1077 }
1078
1079 if (!nonempty) {
1080 RCU_INIT_POINTER(dev->xps_maps, NULL);
1081 call_rcu(&dev_maps->rcu, xps_dev_maps_release);
1082 }
1083 }
1084
1085 mutex_unlock(&xps_map_mutex);
1086
1087 memset(kobj, 0, sizeof(*kobj));
1088 dev_put(queue->dev);
1089}
1090
1091static struct kobj_type netdev_queue_ktype = {
1092 .sysfs_ops = &netdev_queue_sysfs_ops,
1093 .release = netdev_queue_release,
1094 .default_attrs = netdev_queue_default_attrs,
1095};
1096
1097static int netdev_queue_add_kobject(struct net_device *net, int index)
1098{
1099 struct netdev_queue *queue = net->_tx + index;
1100 struct kobject *kobj = &queue->kobj;
1101 int error = 0;
1102
1103 kobj->kset = net->queues_kset;
1104 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1105 "tx-%u", index);
1106 if (error) {
1107 kobject_put(kobj);
1108 return error;
1109 }
1110
1111 kobject_uevent(kobj, KOBJ_ADD);
1112 dev_hold(queue->dev);
1113
1114 return error;
1115}
1116#endif /* CONFIG_XPS */
1117
1118int
1119netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1120{
1121#ifdef CONFIG_XPS
1122 int i;
1123 int error = 0;
1124
1125 for (i = old_num; i < new_num; i++) {
1126 error = netdev_queue_add_kobject(net, i);
1127 if (error) {
1128 new_num = old_num;
1129 break;
1130 }
1131 }
1132
1133 while (--i >= new_num)
1134 kobject_put(&net->_tx[i].kobj);
1135
1136 return error;
1137#else
1138 return 0;
1139#endif
1140}
1141
1142static int register_queue_kobjects(struct net_device *net)
1143{
1144 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1145
1146#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
781 net->queues_kset = kset_create_and_add("queues", 1147 net->queues_kset = kset_create_and_add("queues",
782 NULL, &net->dev.kobj); 1148 NULL, &net->dev.kobj);
783 if (!net->queues_kset) 1149 if (!net->queues_kset)
784 return -ENOMEM; 1150 return -ENOMEM;
785 return net_rx_queue_update_kobjects(net, 0, net->real_num_rx_queues); 1151#endif
1152
1153#ifdef CONFIG_RPS
1154 real_rx = net->real_num_rx_queues;
1155#endif
1156 real_tx = net->real_num_tx_queues;
1157
1158 error = net_rx_queue_update_kobjects(net, 0, real_rx);
1159 if (error)
1160 goto error;
1161 rxq = real_rx;
1162
1163 error = netdev_queue_update_kobjects(net, 0, real_tx);
1164 if (error)
1165 goto error;
1166 txq = real_tx;
1167
1168 return 0;
1169
1170error:
1171 netdev_queue_update_kobjects(net, txq, 0);
1172 net_rx_queue_update_kobjects(net, rxq, 0);
1173 return error;
786} 1174}
787 1175
788static void rx_queue_remove_kobjects(struct net_device *net) 1176static void remove_queue_kobjects(struct net_device *net)
789{ 1177{
790 net_rx_queue_update_kobjects(net, net->real_num_rx_queues, 0); 1178 int real_rx = 0, real_tx = 0;
1179
1180#ifdef CONFIG_RPS
1181 real_rx = net->real_num_rx_queues;
1182#endif
1183 real_tx = net->real_num_tx_queues;
1184
1185 net_rx_queue_update_kobjects(net, real_rx, 0);
1186 netdev_queue_update_kobjects(net, real_tx, 0);
1187#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
791 kset_unregister(net->queues_kset); 1188 kset_unregister(net->queues_kset);
1189#endif
792} 1190}
793#endif /* CONFIG_RPS */
794 1191
795static const void *net_current_ns(void) 1192static const void *net_current_ns(void)
796{ 1193{
@@ -889,9 +1286,7 @@ void netdev_unregister_kobject(struct net_device * net)
889 1286
890 kobject_get(&dev->kobj); 1287 kobject_get(&dev->kobj);
891 1288
892#ifdef CONFIG_RPS 1289 remove_queue_kobjects(net);
893 rx_queue_remove_kobjects(net);
894#endif
895 1290
896 device_del(dev); 1291 device_del(dev);
897} 1292}
@@ -930,13 +1325,11 @@ int netdev_register_kobject(struct net_device *net)
930 if (error) 1325 if (error)
931 return error; 1326 return error;
932 1327
933#ifdef CONFIG_RPS 1328 error = register_queue_kobjects(net);
934 error = rx_queue_register_kobjects(net);
935 if (error) { 1329 if (error) {
936 device_del(dev); 1330 device_del(dev);
937 return error; 1331 return error;
938 } 1332 }
939#endif
940 1333
941 return error; 1334 return error;
942} 1335}
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 778e1571548..bd7751ec1c4 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -4,8 +4,8 @@
4int netdev_kobject_init(void); 4int netdev_kobject_init(void);
5int netdev_register_kobject(struct net_device *); 5int netdev_register_kobject(struct net_device *);
6void netdev_unregister_kobject(struct net_device *); 6void netdev_unregister_kobject(struct net_device *);
7#ifdef CONFIG_RPS
8int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num); 7int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
9#endif 8int netdev_queue_update_kobjects(struct net_device *net,
9 int old_num, int new_num);
10 10
11#endif 11#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 4e98ffac3af..ee38acb6d46 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -76,8 +76,7 @@ static void queue_process(struct work_struct *work)
76 76
77 local_irq_save(flags); 77 local_irq_save(flags);
78 __netif_tx_lock(txq, smp_processor_id()); 78 __netif_tx_lock(txq, smp_processor_id());
79 if (netif_tx_queue_stopped(txq) || 79 if (netif_tx_queue_frozen_or_stopped(txq) ||
80 netif_tx_queue_frozen(txq) ||
81 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { 80 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
82 skb_queue_head(&npinfo->txq, skb); 81 skb_queue_head(&npinfo->txq, skb);
83 __netif_tx_unlock(txq); 82 __netif_tx_unlock(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 33bc3823ac6..2953b2abc97 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -378,6 +378,7 @@ struct pktgen_dev {
378 378
379 u16 queue_map_min; 379 u16 queue_map_min;
380 u16 queue_map_max; 380 u16 queue_map_max;
381 __u32 skb_priority; /* skb priority field */
381 int node; /* Memory node */ 382 int node; /* Memory node */
382 383
383#ifdef CONFIG_XFRM 384#ifdef CONFIG_XFRM
@@ -394,6 +395,8 @@ struct pktgen_hdr {
394 __be32 tv_usec; 395 __be32 tv_usec;
395}; 396};
396 397
398static bool pktgen_exiting __read_mostly;
399
397struct pktgen_thread { 400struct pktgen_thread {
398 spinlock_t if_lock; /* for list of devices */ 401 spinlock_t if_lock; /* for list of devices */
399 struct list_head if_list; /* All device here */ 402 struct list_head if_list; /* All device here */
@@ -547,6 +550,10 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
547 pkt_dev->queue_map_min, 550 pkt_dev->queue_map_min,
548 pkt_dev->queue_map_max); 551 pkt_dev->queue_map_max);
549 552
553 if (pkt_dev->skb_priority)
554 seq_printf(seq, " skb_priority: %u\n",
555 pkt_dev->skb_priority);
556
550 if (pkt_dev->flags & F_IPV6) { 557 if (pkt_dev->flags & F_IPV6) {
551 char b1[128], b2[128], b3[128]; 558 char b1[128], b2[128], b3[128];
552 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); 559 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
@@ -1711,6 +1718,18 @@ static ssize_t pktgen_if_write(struct file *file,
1711 return count; 1718 return count;
1712 } 1719 }
1713 1720
1721 if (!strcmp(name, "skb_priority")) {
1722 len = num_arg(&user_buffer[i], 9, &value);
1723 if (len < 0)
1724 return len;
1725
1726 i += len;
1727 pkt_dev->skb_priority = value;
1728 sprintf(pg_result, "OK: skb_priority=%i",
1729 pkt_dev->skb_priority);
1730 return count;
1731 }
1732
1714 sprintf(pkt_dev->result, "No such parameter \"%s\"", name); 1733 sprintf(pkt_dev->result, "No such parameter \"%s\"", name);
1715 return -EINVAL; 1734 return -EINVAL;
1716} 1735}
@@ -2671,6 +2690,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2671 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2690 skb->transport_header = skb->network_header + sizeof(struct iphdr);
2672 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2691 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
2673 skb_set_queue_mapping(skb, queue_map); 2692 skb_set_queue_mapping(skb, queue_map);
2693 skb->priority = pkt_dev->skb_priority;
2694
2674 iph = ip_hdr(skb); 2695 iph = ip_hdr(skb);
2675 udph = udp_hdr(skb); 2696 udph = udp_hdr(skb);
2676 2697
@@ -3016,6 +3037,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
3016 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 3037 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
3017 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 3038 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
3018 skb_set_queue_mapping(skb, queue_map); 3039 skb_set_queue_mapping(skb, queue_map);
3040 skb->priority = pkt_dev->skb_priority;
3019 iph = ipv6_hdr(skb); 3041 iph = ipv6_hdr(skb);
3020 udph = udp_hdr(skb); 3042 udph = udp_hdr(skb);
3021 3043
@@ -3431,11 +3453,6 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3431 3453
3432 remove_proc_entry(t->tsk->comm, pg_proc_dir); 3454 remove_proc_entry(t->tsk->comm, pg_proc_dir);
3433 3455
3434 mutex_lock(&pktgen_thread_lock);
3435
3436 list_del(&t->th_list);
3437
3438 mutex_unlock(&pktgen_thread_lock);
3439} 3456}
3440 3457
3441static void pktgen_resched(struct pktgen_dev *pkt_dev) 3458static void pktgen_resched(struct pktgen_dev *pkt_dev)
@@ -3510,7 +3527,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3510 3527
3511 __netif_tx_lock_bh(txq); 3528 __netif_tx_lock_bh(txq);
3512 3529
3513 if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) { 3530 if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
3514 ret = NETDEV_TX_BUSY; 3531 ret = NETDEV_TX_BUSY;
3515 pkt_dev->last_ok = 0; 3532 pkt_dev->last_ok = 0;
3516 goto unlock; 3533 goto unlock;
@@ -3582,6 +3599,8 @@ static int pktgen_thread_worker(void *arg)
3582 pkt_dev = next_to_run(t); 3599 pkt_dev = next_to_run(t);
3583 3600
3584 if (unlikely(!pkt_dev && t->control == 0)) { 3601 if (unlikely(!pkt_dev && t->control == 0)) {
3602 if (pktgen_exiting)
3603 break;
3585 wait_event_interruptible_timeout(t->queue, 3604 wait_event_interruptible_timeout(t->queue,
3586 t->control != 0, 3605 t->control != 0,
3587 HZ/10); 3606 HZ/10);
@@ -3634,6 +3653,13 @@ static int pktgen_thread_worker(void *arg)
3634 pr_debug("%s removing thread\n", t->tsk->comm); 3653 pr_debug("%s removing thread\n", t->tsk->comm);
3635 pktgen_rem_thread(t); 3654 pktgen_rem_thread(t);
3636 3655
3656 /* Wait for kthread_stop */
3657 while (!kthread_should_stop()) {
3658 set_current_state(TASK_INTERRUPTIBLE);
3659 schedule();
3660 }
3661 __set_current_state(TASK_RUNNING);
3662
3637 return 0; 3663 return 0;
3638} 3664}
3639 3665
@@ -3908,6 +3934,7 @@ static void __exit pg_cleanup(void)
3908 struct list_head *q, *n; 3934 struct list_head *q, *n;
3909 3935
3910 /* Stop all interfaces & threads */ 3936 /* Stop all interfaces & threads */
3937 pktgen_exiting = true;
3911 3938
3912 list_for_each_safe(q, n, &pktgen_threads) { 3939 list_for_each_safe(q, n, &pktgen_threads) {
3913 t = list_entry(q, struct pktgen_thread, th_list); 3940 t = list_entry(q, struct pktgen_thread, th_list);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index fceeb37d716..182236b2510 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -33,6 +33,7 @@
33 * Note : Dont forget somaxconn that may limit backlog too. 33 * Note : Dont forget somaxconn that may limit backlog too.
34 */ 34 */
35int sysctl_max_syn_backlog = 256; 35int sysctl_max_syn_backlog = 256;
36EXPORT_SYMBOL(sysctl_max_syn_backlog);
36 37
37int reqsk_queue_alloc(struct request_sock_queue *queue, 38int reqsk_queue_alloc(struct request_sock_queue *queue,
38 unsigned int nr_table_entries) 39 unsigned int nr_table_entries)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 841c287ef40..750db57f3bb 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -362,6 +362,95 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
362 return size; 362 return size;
363} 363}
364 364
365static LIST_HEAD(rtnl_af_ops);
366
367static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
368{
369 const struct rtnl_af_ops *ops;
370
371 list_for_each_entry(ops, &rtnl_af_ops, list) {
372 if (ops->family == family)
373 return ops;
374 }
375
376 return NULL;
377}
378
379/**
380 * __rtnl_af_register - Register rtnl_af_ops with rtnetlink.
381 * @ops: struct rtnl_af_ops * to register
382 *
383 * The caller must hold the rtnl_mutex.
384 *
385 * Returns 0 on success or a negative error code.
386 */
387int __rtnl_af_register(struct rtnl_af_ops *ops)
388{
389 list_add_tail(&ops->list, &rtnl_af_ops);
390 return 0;
391}
392EXPORT_SYMBOL_GPL(__rtnl_af_register);
393
394/**
395 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
396 * @ops: struct rtnl_af_ops * to register
397 *
398 * Returns 0 on success or a negative error code.
399 */
400int rtnl_af_register(struct rtnl_af_ops *ops)
401{
402 int err;
403
404 rtnl_lock();
405 err = __rtnl_af_register(ops);
406 rtnl_unlock();
407 return err;
408}
409EXPORT_SYMBOL_GPL(rtnl_af_register);
410
411/**
412 * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
413 * @ops: struct rtnl_af_ops * to unregister
414 *
415 * The caller must hold the rtnl_mutex.
416 */
417void __rtnl_af_unregister(struct rtnl_af_ops *ops)
418{
419 list_del(&ops->list);
420}
421EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
422
423/**
424 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
425 * @ops: struct rtnl_af_ops * to unregister
426 */
427void rtnl_af_unregister(struct rtnl_af_ops *ops)
428{
429 rtnl_lock();
430 __rtnl_af_unregister(ops);
431 rtnl_unlock();
432}
433EXPORT_SYMBOL_GPL(rtnl_af_unregister);
434
435static size_t rtnl_link_get_af_size(const struct net_device *dev)
436{
437 struct rtnl_af_ops *af_ops;
438 size_t size;
439
440 /* IFLA_AF_SPEC */
441 size = nla_total_size(sizeof(struct nlattr));
442
443 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
444 if (af_ops->get_link_af_size) {
445 /* AF_* + nested data */
446 size += nla_total_size(sizeof(struct nlattr)) +
447 af_ops->get_link_af_size(dev);
448 }
449 }
450
451 return size;
452}
453
365static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 454static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
366{ 455{
367 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 456 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
@@ -671,7 +760,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
671 + nla_total_size(4) /* IFLA_NUM_VF */ 760 + nla_total_size(4) /* IFLA_NUM_VF */
672 + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ 761 + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
673 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 762 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
674 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ 763 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
764 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
675} 765}
676 766
677static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 767static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -757,7 +847,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
757 struct nlmsghdr *nlh; 847 struct nlmsghdr *nlh;
758 struct rtnl_link_stats64 temp; 848 struct rtnl_link_stats64 temp;
759 const struct rtnl_link_stats64 *stats; 849 const struct rtnl_link_stats64 *stats;
760 struct nlattr *attr; 850 struct nlattr *attr, *af_spec;
851 struct rtnl_af_ops *af_ops;
761 852
762 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 853 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
763 if (nlh == NULL) 854 if (nlh == NULL)
@@ -866,6 +957,36 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
866 goto nla_put_failure; 957 goto nla_put_failure;
867 } 958 }
868 959
960 if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
961 goto nla_put_failure;
962
963 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
964 if (af_ops->fill_link_af) {
965 struct nlattr *af;
966 int err;
967
968 if (!(af = nla_nest_start(skb, af_ops->family)))
969 goto nla_put_failure;
970
971 err = af_ops->fill_link_af(skb, dev);
972
973 /*
974 * Caller may return ENODATA to indicate that there
975 * was no data to be dumped. This is not an error, it
976 * means we should trim the attribute header and
977 * continue.
978 */
979 if (err == -ENODATA)
980 nla_nest_cancel(skb, af);
981 else if (err < 0)
982 goto nla_put_failure;
983
984 nla_nest_end(skb, af);
985 }
986 }
987
988 nla_nest_end(skb, af_spec);
989
869 return nlmsg_end(skb, nlh); 990 return nlmsg_end(skb, nlh);
870 991
871nla_put_failure: 992nla_put_failure:
@@ -924,6 +1045,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
924 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1045 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
925 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1046 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
926 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1047 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1048 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
927}; 1049};
928EXPORT_SYMBOL(ifla_policy); 1050EXPORT_SYMBOL(ifla_policy);
929 1051
@@ -985,6 +1107,28 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
985 return -EINVAL; 1107 return -EINVAL;
986 } 1108 }
987 1109
1110 if (tb[IFLA_AF_SPEC]) {
1111 struct nlattr *af;
1112 int rem, err;
1113
1114 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1115 const struct rtnl_af_ops *af_ops;
1116
1117 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
1118 return -EAFNOSUPPORT;
1119
1120 if (!af_ops->set_link_af)
1121 return -EOPNOTSUPP;
1122
1123 if (af_ops->validate_link_af) {
1124 err = af_ops->validate_link_af(dev,
1125 tb[IFLA_AF_SPEC]);
1126 if (err < 0)
1127 return err;
1128 }
1129 }
1130 }
1131
988 return 0; 1132 return 0;
989} 1133}
990 1134
@@ -1225,6 +1369,24 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1225 goto errout; 1369 goto errout;
1226 modified = 1; 1370 modified = 1;
1227 } 1371 }
1372
1373 if (tb[IFLA_AF_SPEC]) {
1374 struct nlattr *af;
1375 int rem;
1376
1377 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1378 const struct rtnl_af_ops *af_ops;
1379
1380 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
1381 BUG();
1382
1383 err = af_ops->set_link_af(dev, af);
1384 if (err < 0)
1385 goto errout;
1386
1387 modified = 1;
1388 }
1389 }
1228 err = 0; 1390 err = 0;
1229 1391
1230errout: 1392errout:
diff --git a/net/core/scm.c b/net/core/scm.c
index 413cab89017..bbe45445080 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -79,10 +79,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
79 return -ENOMEM; 79 return -ENOMEM;
80 *fplp = fpl; 80 *fplp = fpl;
81 fpl->count = 0; 81 fpl->count = 0;
82 fpl->max = SCM_MAX_FD;
82 } 83 }
83 fpp = &fpl->fp[fpl->count]; 84 fpp = &fpl->fp[fpl->count];
84 85
85 if (fpl->count + num > SCM_MAX_FD) 86 if (fpl->count + num > fpl->max)
86 return -EINVAL; 87 return -EINVAL;
87 88
88 /* 89 /*
@@ -331,11 +332,12 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
331 if (!fpl) 332 if (!fpl)
332 return NULL; 333 return NULL;
333 334
334 new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL); 335 new_fpl = kmemdup(fpl, offsetof(struct scm_fp_list, fp[fpl->count]),
336 GFP_KERNEL);
335 if (new_fpl) { 337 if (new_fpl) {
336 for (i=fpl->count-1; i>=0; i--) 338 for (i = 0; i < fpl->count; i++)
337 get_file(fpl->fp[i]); 339 get_file(fpl->fp[i]);
338 memcpy(new_fpl, fpl, sizeof(*fpl)); 340 new_fpl->max = new_fpl->count;
339 } 341 }
340 return new_fpl; 342 return new_fpl;
341} 343}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 104f8444754..8814a9a52f4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -778,6 +778,28 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
778 778
779 size = SKB_DATA_ALIGN(size); 779 size = SKB_DATA_ALIGN(size);
780 780
781 /* Check if we can avoid taking references on fragments if we own
782 * the last reference on skb->head. (see skb_release_data())
783 */
784 if (!skb->cloned)
785 fastpath = true;
786 else {
787 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
788
789 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
790 }
791
792 if (fastpath &&
793 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
794 memmove(skb->head + size, skb_shinfo(skb),
795 offsetof(struct skb_shared_info,
796 frags[skb_shinfo(skb)->nr_frags]));
797 memmove(skb->head + nhead, skb->head,
798 skb_tail_pointer(skb) - skb->head);
799 off = nhead;
800 goto adjust_others;
801 }
802
781 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 803 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
782 if (!data) 804 if (!data)
783 goto nodata; 805 goto nodata;
@@ -791,17 +813,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
791 skb_shinfo(skb), 813 skb_shinfo(skb),
792 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 814 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
793 815
794 /* Check if we can avoid taking references on fragments if we own
795 * the last reference on skb->head. (see skb_release_data())
796 */
797 if (!skb->cloned)
798 fastpath = true;
799 else {
800 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
801
802 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
803 }
804
805 if (fastpath) { 816 if (fastpath) {
806 kfree(skb->head); 817 kfree(skb->head);
807 } else { 818 } else {
@@ -816,6 +827,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
816 off = (data + nhead) - skb->head; 827 off = (data + nhead) - skb->head;
817 828
818 skb->head = data; 829 skb->head = data;
830adjust_others:
819 skb->data += off; 831 skb->data += off;
820#ifdef NET_SKBUFF_DATA_USES_OFFSET 832#ifdef NET_SKBUFF_DATA_USES_OFFSET
821 skb->end = size; 833 skb->end = size;
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 0ae6c22da85..b124d28ff1c 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -26,12 +26,12 @@ static struct sock_filter ptp_filter[] = {
26 PTP_FILTER 26 PTP_FILTER
27}; 27};
28 28
29static unsigned int classify(struct sk_buff *skb) 29static unsigned int classify(const struct sk_buff *skb)
30{ 30{
31 if (likely(skb->dev && 31 if (likely(skb->dev &&
32 skb->dev->phydev && 32 skb->dev->phydev &&
33 skb->dev->phydev->drv)) 33 skb->dev->phydev->drv))
34 return sk_run_filter(skb, ptp_filter, ARRAY_SIZE(ptp_filter)); 34 return sk_run_filter(skb, ptp_filter);
35 else 35 else
36 return PTP_CLASS_NONE; 36 return PTP_CLASS_NONE;
37} 37}
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index 2991efcc8de..5c8362b037e 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o 1obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o
2 2
3dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o 3dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o \
4 4 qpolicy.o
5# 5#
6# CCID algorithms to be used by dccp.ko 6# CCID algorithms to be used by dccp.ko
7# 7#
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 92a6fcb40d7..25b7a8d1ad5 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -1,444 +1,375 @@
1/* 1/*
2 * net/dccp/ackvec.c 2 * net/dccp/ackvec.c
3 * 3 *
4 * An implementation of the DCCP protocol 4 * An implementation of Ack Vectors for the DCCP protocol
5 * Copyright (c) 2007 University of Aberdeen, Scotland, UK
5 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License; 10 * Free Software Foundation; version 2 of the License;
10 */ 11 */
11
12#include "ackvec.h"
13#include "dccp.h" 12#include "dccp.h"
14
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/kernel.h> 13#include <linux/kernel.h>
18#include <linux/skbuff.h>
19#include <linux/slab.h> 14#include <linux/slab.h>
20 15
21#include <net/sock.h>
22
23static struct kmem_cache *dccp_ackvec_slab; 16static struct kmem_cache *dccp_ackvec_slab;
24static struct kmem_cache *dccp_ackvec_record_slab; 17static struct kmem_cache *dccp_ackvec_record_slab;
25 18
26static struct dccp_ackvec_record *dccp_ackvec_record_new(void) 19struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
27{ 20{
28 struct dccp_ackvec_record *avr = 21 struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
29 kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
30 22
31 if (avr != NULL) 23 if (av != NULL) {
32 INIT_LIST_HEAD(&avr->avr_node); 24 av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
33 25 INIT_LIST_HEAD(&av->av_records);
34 return avr; 26 }
27 return av;
35} 28}
36 29
37static void dccp_ackvec_record_delete(struct dccp_ackvec_record *avr) 30static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
38{ 31{
39 if (unlikely(avr == NULL)) 32 struct dccp_ackvec_record *cur, *next;
40 return; 33
41 /* Check if deleting a linked record */ 34 list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
42 WARN_ON(!list_empty(&avr->avr_node)); 35 kmem_cache_free(dccp_ackvec_record_slab, cur);
43 kmem_cache_free(dccp_ackvec_record_slab, avr); 36 INIT_LIST_HEAD(&av->av_records);
44} 37}
45 38
46static void dccp_ackvec_insert_avr(struct dccp_ackvec *av, 39void dccp_ackvec_free(struct dccp_ackvec *av)
47 struct dccp_ackvec_record *avr)
48{ 40{
49 /* 41 if (likely(av != NULL)) {
50 * AVRs are sorted by seqno. Since we are sending them in order, we 42 dccp_ackvec_purge_records(av);
51 * just add the AVR at the head of the list. 43 kmem_cache_free(dccp_ackvec_slab, av);
52 * -sorbo.
53 */
54 if (!list_empty(&av->av_records)) {
55 const struct dccp_ackvec_record *head =
56 list_entry(av->av_records.next,
57 struct dccp_ackvec_record,
58 avr_node);
59 BUG_ON(before48(avr->avr_ack_seqno, head->avr_ack_seqno));
60 } 44 }
61
62 list_add(&avr->avr_node, &av->av_records);
63} 45}
64 46
65int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) 47/**
48 * dccp_ackvec_update_records - Record information about sent Ack Vectors
49 * @av: Ack Vector records to update
50 * @seqno: Sequence number of the packet carrying the Ack Vector just sent
51 * @nonce_sum: The sum of all buffer nonces contained in the Ack Vector
52 */
53int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
66{ 54{
67 struct dccp_sock *dp = dccp_sk(sk);
68 struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
69 /* Figure out how many options do we need to represent the ackvec */
70 const u8 nr_opts = DIV_ROUND_UP(av->av_vec_len, DCCP_SINGLE_OPT_MAXLEN);
71 u16 len = av->av_vec_len + 2 * nr_opts, i;
72 u32 elapsed_time;
73 const unsigned char *tail, *from;
74 unsigned char *to;
75 struct dccp_ackvec_record *avr; 55 struct dccp_ackvec_record *avr;
76 suseconds_t delta;
77
78 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
79 return -1;
80
81 delta = ktime_us_delta(ktime_get_real(), av->av_time);
82 elapsed_time = delta / 10;
83 56
84 if (elapsed_time != 0 && 57 avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
85 dccp_insert_option_elapsed_time(skb, elapsed_time))
86 return -1;
87
88 avr = dccp_ackvec_record_new();
89 if (avr == NULL) 58 if (avr == NULL)
90 return -1; 59 return -ENOBUFS;
91
92 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
93
94 to = skb_push(skb, len);
95 len = av->av_vec_len;
96 from = av->av_buf + av->av_buf_head;
97 tail = av->av_buf + DCCP_MAX_ACKVEC_LEN;
98
99 for (i = 0; i < nr_opts; ++i) {
100 int copylen = len;
101
102 if (len > DCCP_SINGLE_OPT_MAXLEN)
103 copylen = DCCP_SINGLE_OPT_MAXLEN;
104
105 *to++ = DCCPO_ACK_VECTOR_0;
106 *to++ = copylen + 2;
107
108 /* Check if buf_head wraps */
109 if (from + copylen > tail) {
110 const u16 tailsize = tail - from;
111
112 memcpy(to, from, tailsize);
113 to += tailsize;
114 len -= tailsize;
115 copylen -= tailsize;
116 from = av->av_buf;
117 }
118
119 memcpy(to, from, copylen);
120 from += copylen;
121 to += copylen;
122 len -= copylen;
123 }
124 60
61 avr->avr_ack_seqno = seqno;
62 avr->avr_ack_ptr = av->av_buf_head;
63 avr->avr_ack_ackno = av->av_buf_ackno;
64 avr->avr_ack_nonce = nonce_sum;
65 avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
125 /* 66 /*
126 * From RFC 4340, A.2: 67 * When the buffer overflows, we keep no more than one record. This is
127 * 68 * the simplest way of disambiguating sender-Acks dating from before the
128 * For each acknowledgement it sends, the HC-Receiver will add an 69 * overflow from sender-Acks which refer to after the overflow; a simple
129 * acknowledgement record. ack_seqno will equal the HC-Receiver 70 * solution is preferable here since we are handling an exception.
130 * sequence number it used for the ack packet; ack_ptr will equal
131 * buf_head; ack_ackno will equal buf_ackno; and ack_nonce will
132 * equal buf_nonce.
133 */ 71 */
134 avr->avr_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq; 72 if (av->av_overflow)
135 avr->avr_ack_ptr = av->av_buf_head; 73 dccp_ackvec_purge_records(av);
136 avr->avr_ack_ackno = av->av_buf_ackno; 74 /*
137 avr->avr_ack_nonce = av->av_buf_nonce; 75 * Since GSS is incremented for each packet, the list is automatically
138 avr->avr_sent_len = av->av_vec_len; 76 * arranged in descending order of @ack_seqno.
139 77 */
140 dccp_ackvec_insert_avr(av, avr); 78 list_add(&avr->avr_node, &av->av_records);
141 79
142 dccp_pr_debug("%s ACK Vector 0, len=%d, ack_seqno=%llu, " 80 dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
143 "ack_ackno=%llu\n",
144 dccp_role(sk), avr->avr_sent_len,
145 (unsigned long long)avr->avr_ack_seqno, 81 (unsigned long long)avr->avr_ack_seqno,
146 (unsigned long long)avr->avr_ack_ackno); 82 (unsigned long long)avr->avr_ack_ackno,
83 avr->avr_ack_runlen);
147 return 0; 84 return 0;
148} 85}
149 86
150struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority) 87static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
88 const u64 ackno)
151{ 89{
152 struct dccp_ackvec *av = kmem_cache_alloc(dccp_ackvec_slab, priority); 90 struct dccp_ackvec_record *avr;
153 91 /*
154 if (av != NULL) { 92 * Exploit that records are inserted in descending order of sequence
155 av->av_buf_head = DCCP_MAX_ACKVEC_LEN - 1; 93 * number, start with the oldest record first. If @ackno is `before'
156 av->av_buf_ackno = UINT48_MAX + 1; 94 * the earliest ack_ackno, the packet is too old to be considered.
157 av->av_buf_nonce = 0; 95 */
158 av->av_time = ktime_set(0, 0); 96 list_for_each_entry_reverse(avr, av_list, avr_node) {
159 av->av_vec_len = 0; 97 if (avr->avr_ack_seqno == ackno)
160 INIT_LIST_HEAD(&av->av_records); 98 return avr;
99 if (before48(ackno, avr->avr_ack_seqno))
100 break;
161 } 101 }
162 102 return NULL;
163 return av;
164} 103}
165 104
166void dccp_ackvec_free(struct dccp_ackvec *av) 105/*
106 * Buffer index and length computation using modulo-buffersize arithmetic.
107 * Note that, as pointers move from right to left, head is `before' tail.
108 */
109static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
167{ 110{
168 if (unlikely(av == NULL)) 111 return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
169 return;
170
171 if (!list_empty(&av->av_records)) {
172 struct dccp_ackvec_record *avr, *next;
173
174 list_for_each_entry_safe(avr, next, &av->av_records, avr_node) {
175 list_del_init(&avr->avr_node);
176 dccp_ackvec_record_delete(avr);
177 }
178 }
179
180 kmem_cache_free(dccp_ackvec_slab, av);
181} 112}
182 113
183static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av, 114static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
184 const u32 index)
185{ 115{
186 return av->av_buf[index] & DCCP_ACKVEC_STATE_MASK; 116 return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
187} 117}
188 118
189static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av, 119u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
190 const u32 index)
191{ 120{
192 return av->av_buf[index] & DCCP_ACKVEC_LEN_MASK; 121 if (unlikely(av->av_overflow))
122 return DCCPAV_MAX_ACKVEC_LEN;
123 return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
193} 124}
194 125
195/* 126/**
196 * If several packets are missing, the HC-Receiver may prefer to enter multiple 127 * dccp_ackvec_update_old - Update previous state as per RFC 4340, 11.4.1
197 * bytes with run length 0, rather than a single byte with a larger run length; 128 * @av: non-empty buffer to update
198 * this simplifies table updates if one of the missing packets arrives. 129 * @distance: negative or zero distance of @seqno from buf_ackno downward
130 * @seqno: the (old) sequence number whose record is to be updated
131 * @state: state in which packet carrying @seqno was received
199 */ 132 */
200static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av, 133static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
201 const unsigned int packets, 134 u64 seqno, enum dccp_ackvec_states state)
202 const unsigned char state)
203{ 135{
204 long gap; 136 u16 ptr = av->av_buf_head;
205 long new_head;
206 137
207 if (av->av_vec_len + packets > DCCP_MAX_ACKVEC_LEN) 138 BUG_ON(distance > 0);
208 return -ENOBUFS; 139 if (unlikely(dccp_ackvec_is_empty(av)))
140 return;
209 141
210 gap = packets - 1; 142 do {
211 new_head = av->av_buf_head - packets; 143 u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
212 144
213 if (new_head < 0) { 145 if (distance + runlen >= 0) {
214 if (gap > 0) { 146 /*
215 memset(av->av_buf, DCCP_ACKVEC_STATE_NOT_RECEIVED, 147 * Only update the state if packet has not been received
216 gap + new_head + 1); 148 * yet. This is OK as per the second table in RFC 4340,
217 gap = -new_head; 149 * 11.4.1; i.e. here we are using the following table:
150 * RECEIVED
151 * 0 1 3
152 * S +---+---+---+
153 * T 0 | 0 | 0 | 0 |
154 * O +---+---+---+
155 * R 1 | 1 | 1 | 1 |
156 * E +---+---+---+
157 * D 3 | 0 | 1 | 3 |
158 * +---+---+---+
159 * The "Not Received" state was set by reserve_seats().
160 */
161 if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
162 av->av_buf[ptr] = state;
163 else
164 dccp_pr_debug("Not changing %llu state to %u\n",
165 (unsigned long long)seqno, state);
166 break;
218 } 167 }
219 new_head += DCCP_MAX_ACKVEC_LEN;
220 }
221 168
222 av->av_buf_head = new_head; 169 distance += runlen + 1;
170 ptr = __ackvec_idx_add(ptr, 1);
223 171
224 if (gap > 0) 172 } while (ptr != av->av_buf_tail);
225 memset(av->av_buf + av->av_buf_head + 1, 173}
226 DCCP_ACKVEC_STATE_NOT_RECEIVED, gap);
227 174
228 av->av_buf[av->av_buf_head] = state; 175/* Mark @num entries after buf_head as "Not yet received". */
229 av->av_vec_len += packets; 176static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
230 return 0; 177{
178 u16 start = __ackvec_idx_add(av->av_buf_head, 1),
179 len = DCCPAV_MAX_ACKVEC_LEN - start;
180
181 /* check for buffer wrap-around */
182 if (num > len) {
183 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
184 start = 0;
185 num -= len;
186 }
187 if (num)
188 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
231} 189}
232 190
233/* 191/**
234 * Implements the RFC 4340, Appendix A 192 * dccp_ackvec_add_new - Record one or more new entries in Ack Vector buffer
193 * @av: container of buffer to update (can be empty or non-empty)
194 * @num_packets: number of packets to register (must be >= 1)
195 * @seqno: sequence number of the first packet in @num_packets
196 * @state: state in which packet carrying @seqno was received
235 */ 197 */
236int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, 198static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
237 const u64 ackno, const u8 state) 199 u64 seqno, enum dccp_ackvec_states state)
238{ 200{
239 /* 201 u32 num_cells = num_packets;
240 * Check at the right places if the buffer is full, if it is, tell the
241 * caller to start dropping packets till the HC-Sender acks our ACK
242 * vectors, when we will free up space in av_buf.
243 *
244 * We may well decide to do buffer compression, etc, but for now lets
245 * just drop.
246 *
247 * From Appendix A.1.1 (`New Packets'):
248 *
249 * Of course, the circular buffer may overflow, either when the
250 * HC-Sender is sending data at a very high rate, when the
251 * HC-Receiver's acknowledgements are not reaching the HC-Sender,
252 * or when the HC-Sender is forgetting to acknowledge those acks
253 * (so the HC-Receiver is unable to clean up old state). In this
254 * case, the HC-Receiver should either compress the buffer (by
255 * increasing run lengths when possible), transfer its state to
256 * a larger buffer, or, as a last resort, drop all received
257 * packets, without processing them whatsoever, until its buffer
258 * shrinks again.
259 */
260 202
261 /* See if this is the first ackno being inserted */ 203 if (num_packets > DCCPAV_BURST_THRESH) {
262 if (av->av_vec_len == 0) { 204 u32 lost_packets = num_packets - 1;
263 av->av_buf[av->av_buf_head] = state;
264 av->av_vec_len = 1;
265 } else if (after48(ackno, av->av_buf_ackno)) {
266 const u64 delta = dccp_delta_seqno(av->av_buf_ackno, ackno);
267 205
206 DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
268 /* 207 /*
269 * Look if the state of this packet is the same as the 208 * We received 1 packet and have a loss of size "num_packets-1"
270 * previous ackno and if so if we can bump the head len. 209 * which we squeeze into num_cells-1 rather than reserving an
210 * entire byte for each lost packet.
211 * The reason is that the vector grows in O(burst_length); when
212 * it grows too large there will no room left for the payload.
213 * This is a trade-off: if a few packets out of the burst show
214 * up later, their state will not be changed; it is simply too
215 * costly to reshuffle/reallocate/copy the buffer each time.
216 * Should such problems persist, we will need to switch to a
217 * different underlying data structure.
271 */ 218 */
272 if (delta == 1 && 219 for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
273 dccp_ackvec_state(av, av->av_buf_head) == state && 220 u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN);
274 dccp_ackvec_len(av, av->av_buf_head) < DCCP_ACKVEC_LEN_MASK)
275 av->av_buf[av->av_buf_head]++;
276 else if (dccp_ackvec_set_buf_head_state(av, delta, state))
277 return -ENOBUFS;
278 } else {
279 /*
280 * A.1.2. Old Packets
281 *
282 * When a packet with Sequence Number S <= buf_ackno
283 * arrives, the HC-Receiver will scan the table for
284 * the byte corresponding to S. (Indexing structures
285 * could reduce the complexity of this scan.)
286 */
287 u64 delta = dccp_delta_seqno(ackno, av->av_buf_ackno);
288 u32 index = av->av_buf_head;
289 221
290 while (1) { 222 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
291 const u8 len = dccp_ackvec_len(av, index); 223 av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
292 const u8 av_state = dccp_ackvec_state(av, index); 224
293 /* 225 lost_packets -= len;
294 * valid packets not yet in av_buf have a reserved
295 * entry, with a len equal to 0.
296 */
297 if (av_state == DCCP_ACKVEC_STATE_NOT_RECEIVED &&
298 len == 0 && delta == 0) { /* Found our
299 reserved seat! */
300 dccp_pr_debug("Found %llu reserved seat!\n",
301 (unsigned long long)ackno);
302 av->av_buf[index] = state;
303 goto out;
304 }
305 /* len == 0 means one packet */
306 if (delta < len + 1)
307 goto out_duplicate;
308
309 delta -= len + 1;
310 if (++index == DCCP_MAX_ACKVEC_LEN)
311 index = 0;
312 } 226 }
313 } 227 }
314 228
315 av->av_buf_ackno = ackno; 229 if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
316 av->av_time = ktime_get_real(); 230 DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
317out: 231 av->av_overflow = true;
318 return 0; 232 }
233
234 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
235 if (av->av_overflow)
236 av->av_buf_tail = av->av_buf_head;
319 237
320out_duplicate: 238 av->av_buf[av->av_buf_head] = state;
321 /* Duplicate packet */ 239 av->av_buf_ackno = seqno;
322 dccp_pr_debug("Received a dup or already considered lost " 240
323 "packet: %llu\n", (unsigned long long)ackno); 241 if (num_packets > 1)
324 return -EILSEQ; 242 dccp_ackvec_reserve_seats(av, num_packets - 1);
325} 243}
326 244
327static void dccp_ackvec_throw_record(struct dccp_ackvec *av, 245/**
328 struct dccp_ackvec_record *avr) 246 * dccp_ackvec_input - Register incoming packet in the buffer
247 */
248void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
329{ 249{
330 struct dccp_ackvec_record *next; 250 u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
251 enum dccp_ackvec_states state = DCCPAV_RECEIVED;
331 252
332 /* sort out vector length */ 253 if (dccp_ackvec_is_empty(av)) {
333 if (av->av_buf_head <= avr->avr_ack_ptr) 254 dccp_ackvec_add_new(av, 1, seqno, state);
334 av->av_vec_len = avr->avr_ack_ptr - av->av_buf_head; 255 av->av_tail_ackno = seqno;
335 else
336 av->av_vec_len = DCCP_MAX_ACKVEC_LEN - 1 -
337 av->av_buf_head + avr->avr_ack_ptr;
338 256
339 /* free records */ 257 } else {
340 list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) { 258 s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
341 list_del_init(&avr->avr_node); 259 u8 *current_head = av->av_buf + av->av_buf_head;
342 dccp_ackvec_record_delete(avr);
343 }
344}
345 260
346void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk, 261 if (num_packets == 1 &&
347 const u64 ackno) 262 dccp_ackvec_state(current_head) == state &&
348{ 263 dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
349 struct dccp_ackvec_record *avr;
350 264
351 /* 265 *current_head += 1;
352 * If we traverse backwards, it should be faster when we have large 266 av->av_buf_ackno = seqno;
353 * windows. We will be receiving ACKs for stuff we sent a while back 267
354 * -sorbo. 268 } else if (num_packets > 0) {
355 */ 269 dccp_ackvec_add_new(av, num_packets, seqno, state);
356 list_for_each_entry_reverse(avr, &av->av_records, avr_node) { 270 } else {
357 if (ackno == avr->avr_ack_seqno) { 271 dccp_ackvec_update_old(av, num_packets, seqno, state);
358 dccp_pr_debug("%s ACK packet 0, len=%d, ack_seqno=%llu, " 272 }
359 "ack_ackno=%llu, ACKED!\n",
360 dccp_role(sk), 1,
361 (unsigned long long)avr->avr_ack_seqno,
362 (unsigned long long)avr->avr_ack_ackno);
363 dccp_ackvec_throw_record(av, avr);
364 break;
365 } else if (avr->avr_ack_seqno > ackno)
366 break; /* old news */
367 } 273 }
368} 274}
369 275
370static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, 276/**
371 struct sock *sk, u64 *ackno, 277 * dccp_ackvec_clear_state - Perform house-keeping / garbage-collection
372 const unsigned char len, 278 * This routine is called when the peer acknowledges the receipt of Ack Vectors
373 const unsigned char *vector) 279 * up to and including @ackno. While based on on section A.3 of RFC 4340, here
280 * are additional precautions to prevent corrupted buffer state. In particular,
281 * we use tail_ackno to identify outdated records; it always marks the earliest
282 * packet of group (2) in 11.4.2.
283 */
284void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
374{ 285{
375 unsigned char i; 286 struct dccp_ackvec_record *avr, *next;
376 struct dccp_ackvec_record *avr; 287 u8 runlen_now, eff_runlen;
288 s64 delta;
377 289
378 /* Check if we actually sent an ACK vector */ 290 avr = dccp_ackvec_lookup(&av->av_records, ackno);
379 if (list_empty(&av->av_records)) 291 if (avr == NULL)
380 return; 292 return;
293 /*
294 * Deal with outdated acknowledgments: this arises when e.g. there are
295 * several old records and the acks from the peer come in slowly. In
296 * that case we may still have records that pre-date tail_ackno.
297 */
298 delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
299 if (delta < 0)
300 goto free_records;
301 /*
302 * Deal with overlapping Ack Vectors: don't subtract more than the
303 * number of packets between tail_ackno and ack_ackno.
304 */
305 eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
381 306
382 i = len; 307 runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
383 /* 308 /*
384 * XXX 309 * The run length of Ack Vector cells does not decrease over time. If
385 * I think it might be more efficient to work backwards. See comment on 310 * the run length is the same as at the time the Ack Vector was sent, we
386 * rcv_ackno. -sorbo. 311 * free the ack_ptr cell. That cell can however not be freed if the run
312 * length has increased: in this case we need to move the tail pointer
313 * backwards (towards higher indices), to its next-oldest neighbour.
387 */ 314 */
388 avr = list_entry(av->av_records.next, struct dccp_ackvec_record, avr_node); 315 if (runlen_now > eff_runlen) {
389 while (i--) {
390 const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
391 u64 ackno_end_rl;
392 316
393 dccp_set_seqno(&ackno_end_rl, *ackno - rl); 317 av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
318 av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
394 319
320 /* This move may not have cleared the overflow flag. */
321 if (av->av_overflow)
322 av->av_overflow = (av->av_buf_head == av->av_buf_tail);
323 } else {
324 av->av_buf_tail = avr->avr_ack_ptr;
395 /* 325 /*
396 * If our AVR sequence number is greater than the ack, go 326 * We have made sure that avr points to a valid cell within the
397 * forward in the AVR list until it is not so. 327 * buffer. This cell is either older than head, or equals head
328 * (empty buffer): in both cases we no longer have any overflow.
398 */ 329 */
399 list_for_each_entry_from(avr, &av->av_records, avr_node) { 330 av->av_overflow = 0;
400 if (!after48(avr->avr_ack_seqno, *ackno)) 331 }
401 goto found;
402 }
403 /* End of the av_records list, not found, exit */
404 break;
405found:
406 if (between48(avr->avr_ack_seqno, ackno_end_rl, *ackno)) {
407 const u8 state = *vector & DCCP_ACKVEC_STATE_MASK;
408 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) {
409 dccp_pr_debug("%s ACK vector 0, len=%d, "
410 "ack_seqno=%llu, ack_ackno=%llu, "
411 "ACKED!\n",
412 dccp_role(sk), len,
413 (unsigned long long)
414 avr->avr_ack_seqno,
415 (unsigned long long)
416 avr->avr_ack_ackno);
417 dccp_ackvec_throw_record(av, avr);
418 break;
419 }
420 /*
421 * If it wasn't received, continue scanning... we might
422 * find another one.
423 */
424 }
425 332
426 dccp_set_seqno(ackno, ackno_end_rl - 1); 333 /*
427 ++vector; 334 * The peer has acknowledged up to and including ack_ackno. Hence the
335 * first packet in group (2) of 11.4.2 is the successor of ack_ackno.
336 */
337 av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
338
339free_records:
340 list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
341 list_del(&avr->avr_node);
342 kmem_cache_free(dccp_ackvec_record_slab, avr);
428 } 343 }
429} 344}
430 345
431int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, 346/*
432 u64 *ackno, const u8 opt, const u8 *value, const u8 len) 347 * Routines to keep track of Ack Vectors received in an skb
348 */
349int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce)
433{ 350{
434 if (len > DCCP_SINGLE_OPT_MAXLEN) 351 struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
435 return -1; 352
353 if (new == NULL)
354 return -ENOBUFS;
355 new->vec = vec;
356 new->len = len;
357 new->nonce = nonce;
436 358
437 /* dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, value, len); */ 359 list_add_tail(&new->node, head);
438 dccp_ackvec_check_rcv_ackvector(dccp_sk(sk)->dccps_hc_rx_ackvec, sk,
439 ackno, len, value);
440 return 0; 360 return 0;
441} 361}
362EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add);
363
364void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
365{
366 struct dccp_ackvec_parsed *cur, *next;
367
368 list_for_each_entry_safe(cur, next, parsed_chunks, node)
369 kfree(cur);
370 INIT_LIST_HEAD(parsed_chunks);
371}
372EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
442 373
443int __init dccp_ackvec_init(void) 374int __init dccp_ackvec_init(void)
444{ 375{
@@ -448,10 +379,9 @@ int __init dccp_ackvec_init(void)
448 if (dccp_ackvec_slab == NULL) 379 if (dccp_ackvec_slab == NULL)
449 goto out_err; 380 goto out_err;
450 381
451 dccp_ackvec_record_slab = 382 dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
452 kmem_cache_create("dccp_ackvec_record", 383 sizeof(struct dccp_ackvec_record),
453 sizeof(struct dccp_ackvec_record), 384 0, SLAB_HWCACHE_ALIGN, NULL);
454 0, SLAB_HWCACHE_ALIGN, NULL);
455 if (dccp_ackvec_record_slab == NULL) 385 if (dccp_ackvec_record_slab == NULL)
456 goto out_destroy_slab; 386 goto out_destroy_slab;
457 387
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index 7ea557b7c6b..e2ab0627a5f 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -3,9 +3,9 @@
3/* 3/*
4 * net/dccp/ackvec.h 4 * net/dccp/ackvec.h
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of Ack Vectors for the DCCP protocol
7 * Copyright (c) 2007 University of Aberdeen, Scotland, UK
7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@mandriva.com> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@mandriva.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as 10 * under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
@@ -13,99 +13,124 @@
13 13
14#include <linux/dccp.h> 14#include <linux/dccp.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/ktime.h>
17#include <linux/list.h> 16#include <linux/list.h>
18#include <linux/types.h> 17#include <linux/types.h>
19 18
20/* We can spread an ack vector across multiple options */ 19/*
21#define DCCP_MAX_ACKVEC_LEN (DCCP_SINGLE_OPT_MAXLEN * 2) 20 * Ack Vector buffer space is static, in multiples of %DCCP_SINGLE_OPT_MAXLEN,
21 * the maximum size of a single Ack Vector. Setting %DCCPAV_NUM_ACKVECS to 1
22 * will be sufficient for most cases of low Ack Ratios, using a value of 2 gives
23 * more headroom if Ack Ratio is higher or when the sender acknowledges slowly.
24 * The maximum value is bounded by the u16 types for indices and functions.
25 */
26#define DCCPAV_NUM_ACKVECS 2
27#define DCCPAV_MAX_ACKVEC_LEN (DCCP_SINGLE_OPT_MAXLEN * DCCPAV_NUM_ACKVECS)
22 28
23/* Estimated minimum average Ack Vector length - used for updating MPS */ 29/* Estimated minimum average Ack Vector length - used for updating MPS */
24#define DCCPAV_MIN_OPTLEN 16 30#define DCCPAV_MIN_OPTLEN 16
25 31
26#define DCCP_ACKVEC_STATE_RECEIVED 0 32/* Threshold for coping with large bursts of losses */
27#define DCCP_ACKVEC_STATE_ECN_MARKED (1 << 6) 33#define DCCPAV_BURST_THRESH (DCCPAV_MAX_ACKVEC_LEN / 8)
28#define DCCP_ACKVEC_STATE_NOT_RECEIVED (3 << 6)
29 34
30#define DCCP_ACKVEC_STATE_MASK 0xC0 /* 11000000 */ 35enum dccp_ackvec_states {
31#define DCCP_ACKVEC_LEN_MASK 0x3F /* 00111111 */ 36 DCCPAV_RECEIVED = 0x00,
37 DCCPAV_ECN_MARKED = 0x40,
38 DCCPAV_RESERVED = 0x80,
39 DCCPAV_NOT_RECEIVED = 0xC0
40};
41#define DCCPAV_MAX_RUNLEN 0x3F
32 42
33/** struct dccp_ackvec - ack vector 43static inline u8 dccp_ackvec_runlen(const u8 *cell)
34 * 44{
35 * This data structure is the one defined in RFC 4340, Appendix A. 45 return *cell & DCCPAV_MAX_RUNLEN;
36 * 46}
37 * @av_buf_head - circular buffer head 47
38 * @av_buf_tail - circular buffer tail 48static inline u8 dccp_ackvec_state(const u8 *cell)
39 * @av_buf_ackno - ack # of the most recent packet acknowledgeable in the 49{
40 * buffer (i.e. %av_buf_head) 50 return *cell & ~DCCPAV_MAX_RUNLEN;
41 * @av_buf_nonce - the one-bit sum of the ECN Nonces on all packets acked 51}
42 * by the buffer with State 0 52
43 * 53/** struct dccp_ackvec - Ack Vector main data structure
44 * Additionally, the HC-Receiver must keep some information about the
45 * Ack Vectors it has recently sent. For each packet sent carrying an
46 * Ack Vector, it remembers four variables:
47 * 54 *
48 * @av_records - list of dccp_ackvec_record 55 * This implements a fixed-size circular buffer within an array and is largely
49 * @av_ack_nonce - the one-bit sum of the ECN Nonces for all State 0. 56 * based on Appendix A of RFC 4340.
50 * 57 *
51 * @av_time - the time in usecs 58 * @av_buf: circular buffer storage area
52 * @av_buf - circular buffer of acknowledgeable packets 59 * @av_buf_head: head index; begin of live portion in @av_buf
60 * @av_buf_tail: tail index; first index _after_ the live portion in @av_buf
61 * @av_buf_ackno: highest seqno of acknowledgeable packet recorded in @av_buf
62 * @av_tail_ackno: lowest seqno of acknowledgeable packet recorded in @av_buf
63 * @av_buf_nonce: ECN nonce sums, each covering subsequent segments of up to
64 * %DCCP_SINGLE_OPT_MAXLEN cells in the live portion of @av_buf
65 * @av_overflow: if 1 then buf_head == buf_tail indicates buffer wraparound
66 * @av_records: list of %dccp_ackvec_record (Ack Vectors sent previously)
53 */ 67 */
54struct dccp_ackvec { 68struct dccp_ackvec {
55 u64 av_buf_ackno; 69 u8 av_buf[DCCPAV_MAX_ACKVEC_LEN];
56 struct list_head av_records;
57 ktime_t av_time;
58 u16 av_buf_head; 70 u16 av_buf_head;
59 u16 av_vec_len; 71 u16 av_buf_tail;
60 u8 av_buf_nonce; 72 u64 av_buf_ackno:48;
61 u8 av_ack_nonce; 73 u64 av_tail_ackno:48;
62 u8 av_buf[DCCP_MAX_ACKVEC_LEN]; 74 bool av_buf_nonce[DCCPAV_NUM_ACKVECS];
75 u8 av_overflow:1;
76 struct list_head av_records;
63}; 77};
64 78
65/** struct dccp_ackvec_record - ack vector record 79/** struct dccp_ackvec_record - Records information about sent Ack Vectors
66 * 80 *
67 * ACK vector record as defined in Appendix A of spec. 81 * These list entries define the additional information which the HC-Receiver
82 * keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A.
68 * 83 *
69 * The list is sorted by avr_ack_seqno 84 * @avr_node: the list node in @av_records
85 * @avr_ack_seqno: sequence number of the packet the Ack Vector was sent on
86 * @avr_ack_ackno: the Ack number that this record/Ack Vector refers to
87 * @avr_ack_ptr: pointer into @av_buf where this record starts
88 * @avr_ack_runlen: run length of @avr_ack_ptr at the time of sending
89 * @avr_ack_nonce: the sum of @av_buf_nonce's at the time this record was sent
70 * 90 *
71 * @avr_node - node in av_records 91 * The list as a whole is sorted in descending order by @avr_ack_seqno.
72 * @avr_ack_seqno - sequence number of the packet this record was sent on
73 * @avr_ack_ackno - sequence number being acknowledged
74 * @avr_ack_ptr - pointer into av_buf where this record starts
75 * @avr_ack_nonce - av_ack_nonce at the time this record was sent
76 * @avr_sent_len - lenght of the record in av_buf
77 */ 92 */
78struct dccp_ackvec_record { 93struct dccp_ackvec_record {
79 struct list_head avr_node; 94 struct list_head avr_node;
80 u64 avr_ack_seqno; 95 u64 avr_ack_seqno:48;
81 u64 avr_ack_ackno; 96 u64 avr_ack_ackno:48;
82 u16 avr_ack_ptr; 97 u16 avr_ack_ptr;
83 u16 avr_sent_len; 98 u8 avr_ack_runlen;
84 u8 avr_ack_nonce; 99 u8 avr_ack_nonce:1;
85}; 100};
86 101
87struct sock;
88struct sk_buff;
89
90extern int dccp_ackvec_init(void); 102extern int dccp_ackvec_init(void);
91extern void dccp_ackvec_exit(void); 103extern void dccp_ackvec_exit(void);
92 104
93extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority); 105extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
94extern void dccp_ackvec_free(struct dccp_ackvec *av); 106extern void dccp_ackvec_free(struct dccp_ackvec *av);
95 107
96extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, 108extern void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
97 const u64 ackno, const u8 state); 109extern int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
98 110extern void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
99extern void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, 111extern u16 dccp_ackvec_buflen(const struct dccp_ackvec *av);
100 struct sock *sk, const u64 ackno);
101extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
102 u64 *ackno, const u8 opt,
103 const u8 *value, const u8 len);
104 112
105extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb); 113static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
106
107static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
108{ 114{
109 return av->av_vec_len; 115 return av->av_overflow == 0 && av->av_buf_head == av->av_buf_tail;
110} 116}
117
118/**
119 * struct dccp_ackvec_parsed - Record offsets of Ack Vectors in skb
120 * @vec: start of vector (offset into skb)
121 * @len: length of @vec
122 * @nonce: whether @vec had an ECN nonce of 0 or 1
123 * @node: FIFO - arranged in descending order of ack_ackno
124 * This structure is used by CCIDs to access Ack Vectors in a received skb.
125 */
126struct dccp_ackvec_parsed {
127 u8 *vec,
128 len,
129 nonce:1;
130 struct list_head node;
131};
132
133extern int dccp_ackvec_parsed_add(struct list_head *head,
134 u8 *vec, u8 len, u8 nonce);
135extern void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
111#endif /* _ACKVEC_H */ 136#endif /* _ACKVEC_H */
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 6576eae9e77..e96d5e81003 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -246,68 +246,6 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
246#endif 246#endif
247} 247}
248 248
249/* XXX Lame code duplication!
250 * returns -1 if none was found.
251 * else returns the next offset to use in the function call.
252 */
253static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
254 unsigned char **vec, unsigned char *veclen)
255{
256 const struct dccp_hdr *dh = dccp_hdr(skb);
257 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
258 unsigned char *opt_ptr;
259 const unsigned char *opt_end = (unsigned char *)dh +
260 (dh->dccph_doff * 4);
261 unsigned char opt, len;
262 unsigned char *value;
263
264 BUG_ON(offset < 0);
265 options += offset;
266 opt_ptr = options;
267 if (opt_ptr >= opt_end)
268 return -1;
269
270 while (opt_ptr != opt_end) {
271 opt = *opt_ptr++;
272 len = 0;
273 value = NULL;
274
275 /* Check if this isn't a single byte option */
276 if (opt > DCCPO_MAX_RESERVED) {
277 if (opt_ptr == opt_end)
278 goto out_invalid_option;
279
280 len = *opt_ptr++;
281 if (len < 3)
282 goto out_invalid_option;
283 /*
284 * Remove the type and len fields, leaving
285 * just the value size
286 */
287 len -= 2;
288 value = opt_ptr;
289 opt_ptr += len;
290
291 if (opt_ptr > opt_end)
292 goto out_invalid_option;
293 }
294
295 switch (opt) {
296 case DCCPO_ACK_VECTOR_0:
297 case DCCPO_ACK_VECTOR_1:
298 *vec = value;
299 *veclen = len;
300 return offset + (opt_ptr - options);
301 }
302 }
303
304 return -1;
305
306out_invalid_option:
307 DCCP_BUG("Invalid option - this should not happen (previous parsing)!");
308 return -1;
309}
310
311/** 249/**
312 * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm 250 * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
313 * This code is almost identical with TCP's tcp_rtt_estimator(), since 251 * This code is almost identical with TCP's tcp_rtt_estimator(), since
@@ -432,16 +370,28 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
432 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd); 370 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
433} 371}
434 372
373static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
374 u8 option, u8 *optval, u8 optlen)
375{
376 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
377
378 switch (option) {
379 case DCCPO_ACK_VECTOR_0:
380 case DCCPO_ACK_VECTOR_1:
381 return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen,
382 option - DCCPO_ACK_VECTOR_0);
383 }
384 return 0;
385}
386
435static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 387static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
436{ 388{
437 struct dccp_sock *dp = dccp_sk(sk); 389 struct dccp_sock *dp = dccp_sk(sk);
438 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 390 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
439 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); 391 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
392 struct dccp_ackvec_parsed *avp;
440 u64 ackno, seqno; 393 u64 ackno, seqno;
441 struct ccid2_seq *seqp; 394 struct ccid2_seq *seqp;
442 unsigned char *vector;
443 unsigned char veclen;
444 int offset = 0;
445 int done = 0; 395 int done = 0;
446 unsigned int maxincr = 0; 396 unsigned int maxincr = 0;
447 397
@@ -475,17 +425,12 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
475 } 425 }
476 426
477 /* check forward path congestion */ 427 /* check forward path congestion */
478 /* still didn't send out new data packets */ 428 if (dccp_packet_without_ack(skb))
479 if (hc->tx_seqh == hc->tx_seqt)
480 return; 429 return;
481 430
482 switch (DCCP_SKB_CB(skb)->dccpd_type) { 431 /* still didn't send out new data packets */
483 case DCCP_PKT_ACK: 432 if (hc->tx_seqh == hc->tx_seqt)
484 case DCCP_PKT_DATAACK: 433 goto done;
485 break;
486 default:
487 return;
488 }
489 434
490 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; 435 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
491 if (after48(ackno, hc->tx_high_ack)) 436 if (after48(ackno, hc->tx_high_ack))
@@ -509,16 +454,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
509 maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); 454 maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
510 455
511 /* go through all ack vectors */ 456 /* go through all ack vectors */
512 while ((offset = ccid2_ackvector(sk, skb, offset, 457 list_for_each_entry(avp, &hc->tx_av_chunks, node) {
513 &vector, &veclen)) != -1) {
514 /* go through this ack vector */ 458 /* go through this ack vector */
515 while (veclen--) { 459 for (; avp->len--; avp->vec++) {
516 const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; 460 u64 ackno_end_rl = SUB48(ackno,
517 u64 ackno_end_rl = SUB48(ackno, rl); 461 dccp_ackvec_runlen(avp->vec));
518 462
519 ccid2_pr_debug("ackvec start:%llu end:%llu\n", 463 ccid2_pr_debug("ackvec %llu |%u,%u|\n",
520 (unsigned long long)ackno, 464 (unsigned long long)ackno,
521 (unsigned long long)ackno_end_rl); 465 dccp_ackvec_state(avp->vec) >> 6,
466 dccp_ackvec_runlen(avp->vec));
522 /* if the seqno we are analyzing is larger than the 467 /* if the seqno we are analyzing is larger than the
523 * current ackno, then move towards the tail of our 468 * current ackno, then move towards the tail of our
524 * seqnos. 469 * seqnos.
@@ -537,17 +482,15 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
537 * run length 482 * run length
538 */ 483 */
539 while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) { 484 while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
540 const u8 state = *vector & 485 const u8 state = dccp_ackvec_state(avp->vec);
541 DCCP_ACKVEC_STATE_MASK;
542 486
543 /* new packet received or marked */ 487 /* new packet received or marked */
544 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED && 488 if (state != DCCPAV_NOT_RECEIVED &&
545 !seqp->ccid2s_acked) { 489 !seqp->ccid2s_acked) {
546 if (state == 490 if (state == DCCPAV_ECN_MARKED)
547 DCCP_ACKVEC_STATE_ECN_MARKED) {
548 ccid2_congestion_event(sk, 491 ccid2_congestion_event(sk,
549 seqp); 492 seqp);
550 } else 493 else
551 ccid2_new_ack(sk, seqp, 494 ccid2_new_ack(sk, seqp,
552 &maxincr); 495 &maxincr);
553 496
@@ -566,7 +509,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
566 break; 509 break;
567 510
568 ackno = SUB48(ackno_end_rl, 1); 511 ackno = SUB48(ackno_end_rl, 1);
569 vector++;
570 } 512 }
571 if (done) 513 if (done)
572 break; 514 break;
@@ -634,10 +576,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
634 sk_stop_timer(sk, &hc->tx_rtotimer); 576 sk_stop_timer(sk, &hc->tx_rtotimer);
635 else 577 else
636 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); 578 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
637 579done:
638 /* check if incoming Acks allow pending packets to be sent */ 580 /* check if incoming Acks allow pending packets to be sent */
639 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) 581 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
640 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); 582 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
583 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
641} 584}
642 585
643static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) 586static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -666,6 +609,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
666 hc->tx_last_cong = ccid2_time_stamp; 609 hc->tx_last_cong = ccid2_time_stamp;
667 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 610 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
668 (unsigned long)sk); 611 (unsigned long)sk);
612 INIT_LIST_HEAD(&hc->tx_av_chunks);
669 return 0; 613 return 0;
670} 614}
671 615
@@ -699,16 +643,17 @@ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
699} 643}
700 644
701struct ccid_operations ccid2_ops = { 645struct ccid_operations ccid2_ops = {
702 .ccid_id = DCCPC_CCID2, 646 .ccid_id = DCCPC_CCID2,
703 .ccid_name = "TCP-like", 647 .ccid_name = "TCP-like",
704 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), 648 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock),
705 .ccid_hc_tx_init = ccid2_hc_tx_init, 649 .ccid_hc_tx_init = ccid2_hc_tx_init,
706 .ccid_hc_tx_exit = ccid2_hc_tx_exit, 650 .ccid_hc_tx_exit = ccid2_hc_tx_exit,
707 .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet, 651 .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet,
708 .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent, 652 .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent,
709 .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv, 653 .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
710 .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock), 654 .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv,
711 .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, 655 .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock),
656 .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv,
712}; 657};
713 658
714#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 659#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 25cb6b216ed..e9985dafc2c 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -55,6 +55,7 @@ struct ccid2_seq {
55 * @tx_rtt_seq: to decay RTTVAR at most once per flight 55 * @tx_rtt_seq: to decay RTTVAR at most once per flight
56 * @tx_rpseq: last consecutive seqno 56 * @tx_rpseq: last consecutive seqno
57 * @tx_rpdupack: dupacks since rpseq 57 * @tx_rpdupack: dupacks since rpseq
58 * @tx_av_chunks: list of Ack Vectors received on current skb
58 */ 59 */
59struct ccid2_hc_tx_sock { 60struct ccid2_hc_tx_sock {
60 u32 tx_cwnd; 61 u32 tx_cwnd;
@@ -79,6 +80,7 @@ struct ccid2_hc_tx_sock {
79 int tx_rpdupack; 80 int tx_rpdupack;
80 u32 tx_last_cong; 81 u32 tx_last_cong;
81 u64 tx_high_ack; 82 u64 tx_high_ack;
83 struct list_head tx_av_chunks;
82}; 84};
83 85
84static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc) 86static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index a8ed459508b..48ad5d9da7c 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -243,6 +243,19 @@ extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
243extern void dccp_send_sync(struct sock *sk, const u64 seq, 243extern void dccp_send_sync(struct sock *sk, const u64 seq,
244 const enum dccp_pkt_type pkt_type); 244 const enum dccp_pkt_type pkt_type);
245 245
246/*
247 * TX Packet Dequeueing Interface
248 */
249extern void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
250extern bool dccp_qpolicy_full(struct sock *sk);
251extern void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
252extern struct sk_buff *dccp_qpolicy_top(struct sock *sk);
253extern struct sk_buff *dccp_qpolicy_pop(struct sock *sk);
254extern bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
255
256/*
257 * TX Packet Output and TX Timers
258 */
246extern void dccp_write_xmit(struct sock *sk); 259extern void dccp_write_xmit(struct sock *sk);
247extern void dccp_write_space(struct sock *sk); 260extern void dccp_write_space(struct sock *sk);
248extern void dccp_flush_write_queue(struct sock *sk, long *time_budget); 261extern void dccp_flush_write_queue(struct sock *sk, long *time_budget);
@@ -457,12 +470,15 @@ static inline void dccp_update_gss(struct sock *sk, u64 seq)
457 dp->dccps_awh = dp->dccps_gss; 470 dp->dccps_awh = dp->dccps_gss;
458} 471}
459 472
473static inline int dccp_ackvec_pending(const struct sock *sk)
474{
475 return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL &&
476 !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec);
477}
478
460static inline int dccp_ack_pending(const struct sock *sk) 479static inline int dccp_ack_pending(const struct sock *sk)
461{ 480{
462 const struct dccp_sock *dp = dccp_sk(sk); 481 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
463 return (dp->dccps_hc_rx_ackvec != NULL &&
464 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) ||
465 inet_csk_ack_scheduled(sk);
466} 482}
467 483
468extern int dccp_feat_finalise_settings(struct dccp_sock *dp); 484extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index e424a09e83f..15af247ea00 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -160,13 +160,15 @@ static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
160 dccp_time_wait(sk, DCCP_TIME_WAIT, 0); 160 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
161} 161}
162 162
163static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) 163static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
164{ 164{
165 struct dccp_sock *dp = dccp_sk(sk); 165 struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
166 166
167 if (dp->dccps_hc_rx_ackvec != NULL) 167 if (av == NULL)
168 dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk, 168 return;
169 DCCP_SKB_CB(skb)->dccpd_ack_seq); 169 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
170 dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
171 dccp_ackvec_input(av, skb);
170} 172}
171 173
172static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb) 174static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
@@ -366,22 +368,13 @@ discard:
366int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 368int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
367 const struct dccp_hdr *dh, const unsigned len) 369 const struct dccp_hdr *dh, const unsigned len)
368{ 370{
369 struct dccp_sock *dp = dccp_sk(sk);
370
371 if (dccp_check_seqno(sk, skb)) 371 if (dccp_check_seqno(sk, skb))
372 goto discard; 372 goto discard;
373 373
374 if (dccp_parse_options(sk, NULL, skb)) 374 if (dccp_parse_options(sk, NULL, skb))
375 return 1; 375 return 1;
376 376
377 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 377 dccp_handle_ackvec_processing(sk, skb);
378 dccp_event_ack_recv(sk, skb);
379
380 if (dp->dccps_hc_rx_ackvec != NULL &&
381 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
382 DCCP_SKB_CB(skb)->dccpd_seq,
383 DCCP_ACKVEC_STATE_RECEIVED))
384 goto discard;
385 dccp_deliver_input_to_ccids(sk, skb); 378 dccp_deliver_input_to_ccids(sk, skb);
386 379
387 return __dccp_rcv_established(sk, skb, dh, len); 380 return __dccp_rcv_established(sk, skb, dh, len);
@@ -633,15 +626,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
633 if (dccp_parse_options(sk, NULL, skb)) 626 if (dccp_parse_options(sk, NULL, skb))
634 return 1; 627 return 1;
635 628
636 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 629 dccp_handle_ackvec_processing(sk, skb);
637 dccp_event_ack_recv(sk, skb);
638
639 if (dp->dccps_hc_rx_ackvec != NULL &&
640 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
641 DCCP_SKB_CB(skb)->dccpd_seq,
642 DCCP_ACKVEC_STATE_RECEIVED))
643 goto discard;
644
645 dccp_deliver_input_to_ccids(sk, skb); 630 dccp_deliver_input_to_ccids(sk, skb);
646 } 631 }
647 632
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 3f69ea11482..45a434f9416 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -462,15 +462,12 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
462{ 462{
463 struct rtable *rt; 463 struct rtable *rt;
464 struct flowi fl = { .oif = skb_rtable(skb)->rt_iif, 464 struct flowi fl = { .oif = skb_rtable(skb)->rt_iif,
465 .nl_u = { .ip4_u = 465 .fl4_dst = ip_hdr(skb)->saddr,
466 { .daddr = ip_hdr(skb)->saddr, 466 .fl4_src = ip_hdr(skb)->daddr,
467 .saddr = ip_hdr(skb)->daddr, 467 .fl4_tos = RT_CONN_FLAGS(sk),
468 .tos = RT_CONN_FLAGS(sk) } },
469 .proto = sk->sk_protocol, 468 .proto = sk->sk_protocol,
470 .uli_u = { .ports = 469 .fl_ip_sport = dccp_hdr(skb)->dccph_dport,
471 { .sport = dccp_hdr(skb)->dccph_dport, 470 .fl_ip_dport = dccp_hdr(skb)->dccph_sport
472 .dport = dccp_hdr(skb)->dccph_sport }
473 }
474 }; 471 };
475 472
476 security_skb_classify_flow(skb, &fl); 473 security_skb_classify_flow(skb, &fl);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index cd306181300..f06ffcfc8d7 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -54,7 +54,6 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
54 struct dccp_sock *dp = dccp_sk(sk); 54 struct dccp_sock *dp = dccp_sk(sk);
55 const struct dccp_hdr *dh = dccp_hdr(skb); 55 const struct dccp_hdr *dh = dccp_hdr(skb);
56 const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type; 56 const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type;
57 u64 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
58 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); 57 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
59 unsigned char *opt_ptr = options; 58 unsigned char *opt_ptr = options;
60 const unsigned char *opt_end = (unsigned char *)dh + 59 const unsigned char *opt_end = (unsigned char *)dh +
@@ -129,14 +128,6 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
129 if (rc) 128 if (rc)
130 goto out_featneg_failed; 129 goto out_featneg_failed;
131 break; 130 break;
132 case DCCPO_ACK_VECTOR_0:
133 case DCCPO_ACK_VECTOR_1:
134 if (dccp_packet_without_ack(skb)) /* RFC 4340, 11.4 */
135 break;
136 if (dp->dccps_hc_rx_ackvec != NULL &&
137 dccp_ackvec_parse(sk, skb, &ackno, opt, value, len))
138 goto out_invalid_option;
139 break;
140 case DCCPO_TIMESTAMP: 131 case DCCPO_TIMESTAMP:
141 if (len != 4) 132 if (len != 4)
142 goto out_invalid_option; 133 goto out_invalid_option;
@@ -226,6 +217,16 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
226 pkt_type, opt, value, len)) 217 pkt_type, opt, value, len))
227 goto out_invalid_option; 218 goto out_invalid_option;
228 break; 219 break;
220 case DCCPO_ACK_VECTOR_0:
221 case DCCPO_ACK_VECTOR_1:
222 if (dccp_packet_without_ack(skb)) /* RFC 4340, 11.4 */
223 break;
224 /*
225 * Ack vectors are processed by the TX CCID if it is
226 * interested. The RX CCID need not parse Ack Vectors,
227 * since it is only interested in clearing old state.
228 * Fall through.
229 */
229 case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC: 230 case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC:
230 if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk, 231 if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
231 pkt_type, opt, value, len)) 232 pkt_type, opt, value, len))
@@ -340,6 +341,7 @@ static inline int dccp_elapsed_time_len(const u32 elapsed_time)
340 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4; 341 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
341} 342}
342 343
344/* FIXME: This function is currently not used anywhere */
343int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time) 345int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time)
344{ 346{
345 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time); 347 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
@@ -424,6 +426,83 @@ static int dccp_insert_option_timestamp_echo(struct dccp_sock *dp,
424 return 0; 426 return 0;
425} 427}
426 428
429static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
430{
431 struct dccp_sock *dp = dccp_sk(sk);
432 struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
433 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
434 const u16 buflen = dccp_ackvec_buflen(av);
435 /* Figure out how many options do we need to represent the ackvec */
436 const u8 nr_opts = DIV_ROUND_UP(buflen, DCCP_SINGLE_OPT_MAXLEN);
437 u16 len = buflen + 2 * nr_opts;
438 u8 i, nonce = 0;
439 const unsigned char *tail, *from;
440 unsigned char *to;
441
442 if (dcb->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) {
443 DCCP_WARN("Lacking space for %u bytes on %s packet\n", len,
444 dccp_packet_name(dcb->dccpd_type));
445 return -1;
446 }
447 /*
448 * Since Ack Vectors are variable-length, we can not always predict
449 * their size. To catch exception cases where the space is running out
450 * on the skb, a separate Sync is scheduled to carry the Ack Vector.
451 */
452 if (len > DCCPAV_MIN_OPTLEN &&
453 len + dcb->dccpd_opt_len + skb->len > dp->dccps_mss_cache) {
454 DCCP_WARN("No space left for Ack Vector (%u) on skb (%u+%u), "
455 "MPS=%u ==> reduce payload size?\n", len, skb->len,
456 dcb->dccpd_opt_len, dp->dccps_mss_cache);
457 dp->dccps_sync_scheduled = 1;
458 return 0;
459 }
460 dcb->dccpd_opt_len += len;
461
462 to = skb_push(skb, len);
463 len = buflen;
464 from = av->av_buf + av->av_buf_head;
465 tail = av->av_buf + DCCPAV_MAX_ACKVEC_LEN;
466
467 for (i = 0; i < nr_opts; ++i) {
468 int copylen = len;
469
470 if (len > DCCP_SINGLE_OPT_MAXLEN)
471 copylen = DCCP_SINGLE_OPT_MAXLEN;
472
473 /*
474 * RFC 4340, 12.2: Encode the Nonce Echo for this Ack Vector via
475 * its type; ack_nonce is the sum of all individual buf_nonce's.
476 */
477 nonce ^= av->av_buf_nonce[i];
478
479 *to++ = DCCPO_ACK_VECTOR_0 + av->av_buf_nonce[i];
480 *to++ = copylen + 2;
481
482 /* Check if buf_head wraps */
483 if (from + copylen > tail) {
484 const u16 tailsize = tail - from;
485
486 memcpy(to, from, tailsize);
487 to += tailsize;
488 len -= tailsize;
489 copylen -= tailsize;
490 from = av->av_buf;
491 }
492
493 memcpy(to, from, copylen);
494 from += copylen;
495 to += copylen;
496 len -= copylen;
497 }
498 /*
499 * Each sent Ack Vector is recorded in the list, as per A.2 of RFC 4340.
500 */
501 if (dccp_ackvec_update_records(av, dcb->dccpd_seq, nonce))
502 return -ENOBUFS;
503 return 0;
504}
505
427/** 506/**
428 * dccp_insert_option_mandatory - Mandatory option (5.8.2) 507 * dccp_insert_option_mandatory - Mandatory option (5.8.2)
429 * Note that since we are using skb_push, this function needs to be called 508 * Note that since we are using skb_push, this function needs to be called
@@ -519,8 +598,7 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
519 if (dccp_insert_option_timestamp(skb)) 598 if (dccp_insert_option_timestamp(skb))
520 return -1; 599 return -1;
521 600
522 } else if (dp->dccps_hc_rx_ackvec != NULL && 601 } else if (dccp_ackvec_pending(sk) &&
523 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec) &&
524 dccp_insert_option_ackvec(sk, skb)) { 602 dccp_insert_option_ackvec(sk, skb)) {
525 return -1; 603 return -1;
526 } 604 }
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 45b91853f5a..784d3021054 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -242,7 +242,7 @@ static void dccp_xmit_packet(struct sock *sk)
242{ 242{
243 int err, len; 243 int err, len;
244 struct dccp_sock *dp = dccp_sk(sk); 244 struct dccp_sock *dp = dccp_sk(sk);
245 struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue); 245 struct sk_buff *skb = dccp_qpolicy_pop(sk);
246 246
247 if (unlikely(skb == NULL)) 247 if (unlikely(skb == NULL))
248 return; 248 return;
@@ -283,6 +283,15 @@ static void dccp_xmit_packet(struct sock *sk)
283 * any local drop will eventually be reported via receiver feedback. 283 * any local drop will eventually be reported via receiver feedback.
284 */ 284 */
285 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); 285 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
286
287 /*
288 * If the CCID needs to transfer additional header options out-of-band
289 * (e.g. Ack Vectors or feature-negotiation options), it activates this
290 * flag to schedule a Sync. The Sync will automatically incorporate all
291 * currently pending header options, thus clearing the backlog.
292 */
293 if (dp->dccps_sync_scheduled)
294 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
286} 295}
287 296
288/** 297/**
@@ -336,7 +345,7 @@ void dccp_write_xmit(struct sock *sk)
336 struct dccp_sock *dp = dccp_sk(sk); 345 struct dccp_sock *dp = dccp_sk(sk);
337 struct sk_buff *skb; 346 struct sk_buff *skb;
338 347
339 while ((skb = skb_peek(&sk->sk_write_queue))) { 348 while ((skb = dccp_qpolicy_top(sk))) {
340 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 349 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
341 350
342 switch (ccid_packet_dequeue_eval(rc)) { 351 switch (ccid_packet_dequeue_eval(rc)) {
@@ -350,8 +359,7 @@ void dccp_write_xmit(struct sock *sk)
350 dccp_xmit_packet(sk); 359 dccp_xmit_packet(sk);
351 break; 360 break;
352 case CCID_PACKET_ERR: 361 case CCID_PACKET_ERR:
353 skb_dequeue(&sk->sk_write_queue); 362 dccp_qpolicy_drop(sk, skb);
354 kfree_skb(skb);
355 dccp_pr_debug("packet discarded due to err=%d\n", rc); 363 dccp_pr_debug("packet discarded due to err=%d\n", rc);
356 } 364 }
357 } 365 }
@@ -636,6 +644,12 @@ void dccp_send_sync(struct sock *sk, const u64 ackno,
636 DCCP_SKB_CB(skb)->dccpd_type = pkt_type; 644 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
637 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno; 645 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
638 646
647 /*
648 * Clear the flag in case the Sync was scheduled for out-of-band data,
649 * such as carrying a long Ack Vector.
650 */
651 dccp_sk(sk)->dccps_sync_scheduled = 0;
652
639 dccp_transmit_skb(sk, skb); 653 dccp_transmit_skb(sk, skb);
640} 654}
641 655
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index ef343d53fce..152975d942d 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -185,6 +185,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
185 dp->dccps_role = DCCP_ROLE_UNDEFINED; 185 dp->dccps_role = DCCP_ROLE_UNDEFINED;
186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT; 186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
187 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1; 187 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
188 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
188 189
189 dccp_init_xmit_timers(sk); 190 dccp_init_xmit_timers(sk);
190 191
@@ -532,6 +533,20 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
532 case DCCP_SOCKOPT_RECV_CSCOV: 533 case DCCP_SOCKOPT_RECV_CSCOV:
533 err = dccp_setsockopt_cscov(sk, val, true); 534 err = dccp_setsockopt_cscov(sk, val, true);
534 break; 535 break;
536 case DCCP_SOCKOPT_QPOLICY_ID:
537 if (sk->sk_state != DCCP_CLOSED)
538 err = -EISCONN;
539 else if (val < 0 || val >= DCCPQ_POLICY_MAX)
540 err = -EINVAL;
541 else
542 dp->dccps_qpolicy = val;
543 break;
544 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
545 if (val < 0)
546 err = -EINVAL;
547 else
548 dp->dccps_tx_qlen = val;
549 break;
535 default: 550 default:
536 err = -ENOPROTOOPT; 551 err = -ENOPROTOOPT;
537 break; 552 break;
@@ -639,6 +654,12 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
639 case DCCP_SOCKOPT_RECV_CSCOV: 654 case DCCP_SOCKOPT_RECV_CSCOV:
640 val = dp->dccps_pcrlen; 655 val = dp->dccps_pcrlen;
641 break; 656 break;
657 case DCCP_SOCKOPT_QPOLICY_ID:
658 val = dp->dccps_qpolicy;
659 break;
660 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
661 val = dp->dccps_tx_qlen;
662 break;
642 case 128 ... 191: 663 case 128 ... 191:
643 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, 664 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
644 len, (u32 __user *)optval, optlen); 665 len, (u32 __user *)optval, optlen);
@@ -681,6 +702,47 @@ int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
681EXPORT_SYMBOL_GPL(compat_dccp_getsockopt); 702EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
682#endif 703#endif
683 704
705static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
706{
707 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
708
709 /*
710 * Assign an (opaque) qpolicy priority value to skb->priority.
711 *
712 * We are overloading this skb field for use with the qpolicy subystem.
713 * The skb->priority is normally used for the SO_PRIORITY option, which
714 * is initialised from sk_priority. Since the assignment of sk_priority
715 * to skb->priority happens later (on layer 3), we overload this field
716 * for use with queueing priorities as long as the skb is on layer 4.
717 * The default priority value (if nothing is set) is 0.
718 */
719 skb->priority = 0;
720
721 for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
722
723 if (!CMSG_OK(msg, cmsg))
724 return -EINVAL;
725
726 if (cmsg->cmsg_level != SOL_DCCP)
727 continue;
728
729 if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
730 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
731 return -EINVAL;
732
733 switch (cmsg->cmsg_type) {
734 case DCCP_SCM_PRIORITY:
735 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
736 return -EINVAL;
737 skb->priority = *(__u32 *)CMSG_DATA(cmsg);
738 break;
739 default:
740 return -EINVAL;
741 }
742 }
743 return 0;
744}
745
684int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 746int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
685 size_t len) 747 size_t len)
686{ 748{
@@ -696,8 +758,7 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
696 758
697 lock_sock(sk); 759 lock_sock(sk);
698 760
699 if (sysctl_dccp_tx_qlen && 761 if (dccp_qpolicy_full(sk)) {
700 (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
701 rc = -EAGAIN; 762 rc = -EAGAIN;
702 goto out_release; 763 goto out_release;
703 } 764 }
@@ -725,7 +786,11 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
725 if (rc != 0) 786 if (rc != 0)
726 goto out_discard; 787 goto out_discard;
727 788
728 skb_queue_tail(&sk->sk_write_queue, skb); 789 rc = dccp_msghdr_parse(msg, skb);
790 if (rc != 0)
791 goto out_discard;
792
793 dccp_qpolicy_push(sk, skb);
729 /* 794 /*
730 * The xmit_timer is set if the TX CCID is rate-based and will expire 795 * The xmit_timer is set if the TX CCID is rate-based and will expire
731 * when congestion control permits to release further packets into the 796 * when congestion control permits to release further packets into the
diff --git a/net/dccp/qpolicy.c b/net/dccp/qpolicy.c
new file mode 100644
index 00000000000..63c30bfa470
--- /dev/null
+++ b/net/dccp/qpolicy.c
@@ -0,0 +1,137 @@
1/*
2 * net/dccp/qpolicy.c
3 *
4 * Policy-based packet dequeueing interface for DCCP.
5 *
6 * Copyright (c) 2008 Tomasz Grobelny <tomasz@grobelny.oswiecenia.net>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License v2
10 * as published by the Free Software Foundation.
11 */
12#include "dccp.h"
13
14/*
15 * Simple Dequeueing Policy:
16 * If tx_qlen is different from 0, enqueue up to tx_qlen elements.
17 */
18static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb)
19{
20 skb_queue_tail(&sk->sk_write_queue, skb);
21}
22
23static bool qpolicy_simple_full(struct sock *sk)
24{
25 return dccp_sk(sk)->dccps_tx_qlen &&
26 sk->sk_write_queue.qlen >= dccp_sk(sk)->dccps_tx_qlen;
27}
28
29static struct sk_buff *qpolicy_simple_top(struct sock *sk)
30{
31 return skb_peek(&sk->sk_write_queue);
32}
33
34/*
35 * Priority-based Dequeueing Policy:
36 * If tx_qlen is different from 0 and the queue has reached its upper bound
37 * of tx_qlen elements, replace older packets lowest-priority-first.
38 */
39static struct sk_buff *qpolicy_prio_best_skb(struct sock *sk)
40{
41 struct sk_buff *skb, *best = NULL;
42
43 skb_queue_walk(&sk->sk_write_queue, skb)
44 if (best == NULL || skb->priority > best->priority)
45 best = skb;
46 return best;
47}
48
49static struct sk_buff *qpolicy_prio_worst_skb(struct sock *sk)
50{
51 struct sk_buff *skb, *worst = NULL;
52
53 skb_queue_walk(&sk->sk_write_queue, skb)
54 if (worst == NULL || skb->priority < worst->priority)
55 worst = skb;
56 return worst;
57}
58
59static bool qpolicy_prio_full(struct sock *sk)
60{
61 if (qpolicy_simple_full(sk))
62 dccp_qpolicy_drop(sk, qpolicy_prio_worst_skb(sk));
63 return false;
64}
65
66/**
67 * struct dccp_qpolicy_operations - TX Packet Dequeueing Interface
68 * @push: add a new @skb to the write queue
69 * @full: indicates that no more packets will be admitted
70 * @top: peeks at whatever the queueing policy defines as its `top'
71 */
72static struct dccp_qpolicy_operations {
73 void (*push) (struct sock *sk, struct sk_buff *skb);
74 bool (*full) (struct sock *sk);
75 struct sk_buff* (*top) (struct sock *sk);
76 __be32 params;
77
78} qpol_table[DCCPQ_POLICY_MAX] = {
79 [DCCPQ_POLICY_SIMPLE] = {
80 .push = qpolicy_simple_push,
81 .full = qpolicy_simple_full,
82 .top = qpolicy_simple_top,
83 .params = 0,
84 },
85 [DCCPQ_POLICY_PRIO] = {
86 .push = qpolicy_simple_push,
87 .full = qpolicy_prio_full,
88 .top = qpolicy_prio_best_skb,
89 .params = DCCP_SCM_PRIORITY,
90 },
91};
92
93/*
94 * Externally visible interface
95 */
96void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb)
97{
98 qpol_table[dccp_sk(sk)->dccps_qpolicy].push(sk, skb);
99}
100
101bool dccp_qpolicy_full(struct sock *sk)
102{
103 return qpol_table[dccp_sk(sk)->dccps_qpolicy].full(sk);
104}
105
106void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb)
107{
108 if (skb != NULL) {
109 skb_unlink(skb, &sk->sk_write_queue);
110 kfree_skb(skb);
111 }
112}
113
114struct sk_buff *dccp_qpolicy_top(struct sock *sk)
115{
116 return qpol_table[dccp_sk(sk)->dccps_qpolicy].top(sk);
117}
118
119struct sk_buff *dccp_qpolicy_pop(struct sock *sk)
120{
121 struct sk_buff *skb = dccp_qpolicy_top(sk);
122
123 if (skb != NULL) {
124 /* Clear any skb fields that we used internally */
125 skb->priority = 0;
126 skb_unlink(skb, &sk->sk_write_queue);
127 }
128 return skb;
129}
130
131bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param)
132{
133 /* check if exactly one bit is set */
134 if (!param || (param & (param - 1)))
135 return false;
136 return (qpol_table[dccp_sk(sk)->dccps_qpolicy].params & param) == param;
137}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 6f97268ed85..0065e7e14af 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1850,7 +1850,7 @@ unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
1850{ 1850{
1851 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER; 1851 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
1852 if (dev) { 1852 if (dev) {
1853 struct dn_dev *dn_db = dev->dn_ptr; 1853 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1854 mtu -= LL_RESERVED_SPACE(dev); 1854 mtu -= LL_RESERVED_SPACE(dev);
1855 if (dn_db->use_long) 1855 if (dn_db->use_long)
1856 mtu -= 21; 1856 mtu -= 21;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 4c409b46aa3..0ba15633c41 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -267,7 +267,7 @@ static int dn_forwarding_proc(ctl_table *table, int write,
267 if (table->extra1 == NULL) 267 if (table->extra1 == NULL)
268 return -EINVAL; 268 return -EINVAL;
269 269
270 dn_db = dev->dn_ptr; 270 dn_db = rcu_dereference_raw(dev->dn_ptr);
271 old = dn_db->parms.forwarding; 271 old = dn_db->parms.forwarding;
272 272
273 err = proc_dointvec(table, write, buffer, lenp, ppos); 273 err = proc_dointvec(table, write, buffer, lenp, ppos);
@@ -332,14 +332,19 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void)
332 return ifa; 332 return ifa;
333} 333}
334 334
335static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa) 335static void dn_dev_free_ifa_rcu(struct rcu_head *head)
336{ 336{
337 kfree(ifa); 337 kfree(container_of(head, struct dn_ifaddr, rcu));
338} 338}
339 339
340static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy) 340static void dn_dev_free_ifa(struct dn_ifaddr *ifa)
341{ 341{
342 struct dn_ifaddr *ifa1 = *ifap; 342 call_rcu(&ifa->rcu, dn_dev_free_ifa_rcu);
343}
344
345static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy)
346{
347 struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap);
343 unsigned char mac_addr[6]; 348 unsigned char mac_addr[6];
344 struct net_device *dev = dn_db->dev; 349 struct net_device *dev = dn_db->dev;
345 350
@@ -373,7 +378,9 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
373 ASSERT_RTNL(); 378 ASSERT_RTNL();
374 379
375 /* Check for duplicates */ 380 /* Check for duplicates */
376 for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { 381 for (ifa1 = rtnl_dereference(dn_db->ifa_list);
382 ifa1 != NULL;
383 ifa1 = rtnl_dereference(ifa1->ifa_next)) {
377 if (ifa1->ifa_local == ifa->ifa_local) 384 if (ifa1->ifa_local == ifa->ifa_local)
378 return -EEXIST; 385 return -EEXIST;
379 } 386 }
@@ -386,7 +393,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
386 } 393 }
387 394
388 ifa->ifa_next = dn_db->ifa_list; 395 ifa->ifa_next = dn_db->ifa_list;
389 dn_db->ifa_list = ifa; 396 rcu_assign_pointer(dn_db->ifa_list, ifa);
390 397
391 dn_ifaddr_notify(RTM_NEWADDR, ifa); 398 dn_ifaddr_notify(RTM_NEWADDR, ifa);
392 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); 399 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -396,7 +403,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
396 403
397static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa) 404static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
398{ 405{
399 struct dn_dev *dn_db = dev->dn_ptr; 406 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
400 int rv; 407 int rv;
401 408
402 if (dn_db == NULL) { 409 if (dn_db == NULL) {
@@ -425,7 +432,8 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
425 struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr; 432 struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
426 struct dn_dev *dn_db; 433 struct dn_dev *dn_db;
427 struct net_device *dev; 434 struct net_device *dev;
428 struct dn_ifaddr *ifa = NULL, **ifap = NULL; 435 struct dn_ifaddr *ifa = NULL;
436 struct dn_ifaddr __rcu **ifap = NULL;
429 int ret = 0; 437 int ret = 0;
430 438
431 if (copy_from_user(ifr, arg, DN_IFREQ_SIZE)) 439 if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
@@ -454,8 +462,10 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
454 goto done; 462 goto done;
455 } 463 }
456 464
457 if ((dn_db = dev->dn_ptr) != NULL) { 465 if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) {
458 for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next) 466 for (ifap = &dn_db->ifa_list;
467 (ifa = rtnl_dereference(*ifap)) != NULL;
468 ifap = &ifa->ifa_next)
459 if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0) 469 if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
460 break; 470 break;
461 } 471 }
@@ -558,7 +568,7 @@ static struct dn_dev *dn_dev_by_index(int ifindex)
558 568
559 dev = __dev_get_by_index(&init_net, ifindex); 569 dev = __dev_get_by_index(&init_net, ifindex);
560 if (dev) 570 if (dev)
561 dn_dev = dev->dn_ptr; 571 dn_dev = rtnl_dereference(dev->dn_ptr);
562 572
563 return dn_dev; 573 return dn_dev;
564} 574}
@@ -576,7 +586,8 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
576 struct nlattr *tb[IFA_MAX+1]; 586 struct nlattr *tb[IFA_MAX+1];
577 struct dn_dev *dn_db; 587 struct dn_dev *dn_db;
578 struct ifaddrmsg *ifm; 588 struct ifaddrmsg *ifm;
579 struct dn_ifaddr *ifa, **ifap; 589 struct dn_ifaddr *ifa;
590 struct dn_ifaddr __rcu **ifap;
580 int err = -EINVAL; 591 int err = -EINVAL;
581 592
582 if (!net_eq(net, &init_net)) 593 if (!net_eq(net, &init_net))
@@ -592,7 +603,9 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
592 goto errout; 603 goto errout;
593 604
594 err = -EADDRNOTAVAIL; 605 err = -EADDRNOTAVAIL;
595 for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) { 606 for (ifap = &dn_db->ifa_list;
607 (ifa = rtnl_dereference(*ifap)) != NULL;
608 ifap = &ifa->ifa_next) {
596 if (tb[IFA_LOCAL] && 609 if (tb[IFA_LOCAL] &&
597 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) 610 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
598 continue; 611 continue;
@@ -632,7 +645,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
632 if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL) 645 if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
633 return -ENODEV; 646 return -ENODEV;
634 647
635 if ((dn_db = dev->dn_ptr) == NULL) { 648 if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) {
636 dn_db = dn_dev_create(dev, &err); 649 dn_db = dn_dev_create(dev, &err);
637 if (!dn_db) 650 if (!dn_db)
638 return err; 651 return err;
@@ -748,11 +761,11 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
748 skip_naddr = 0; 761 skip_naddr = 0;
749 } 762 }
750 763
751 if ((dn_db = dev->dn_ptr) == NULL) 764 if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL)
752 goto cont; 765 goto cont;
753 766
754 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa; 767 for (ifa = rtnl_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
755 ifa = ifa->ifa_next, dn_idx++) { 768 ifa = rtnl_dereference(ifa->ifa_next), dn_idx++) {
756 if (dn_idx < skip_naddr) 769 if (dn_idx < skip_naddr)
757 continue; 770 continue;
758 771
@@ -773,21 +786,22 @@ done:
773 786
774static int dn_dev_get_first(struct net_device *dev, __le16 *addr) 787static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
775{ 788{
776 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 789 struct dn_dev *dn_db;
777 struct dn_ifaddr *ifa; 790 struct dn_ifaddr *ifa;
778 int rv = -ENODEV; 791 int rv = -ENODEV;
779 792
793 rcu_read_lock();
794 dn_db = rcu_dereference(dev->dn_ptr);
780 if (dn_db == NULL) 795 if (dn_db == NULL)
781 goto out; 796 goto out;
782 797
783 rtnl_lock(); 798 ifa = rcu_dereference(dn_db->ifa_list);
784 ifa = dn_db->ifa_list;
785 if (ifa != NULL) { 799 if (ifa != NULL) {
786 *addr = ifa->ifa_local; 800 *addr = ifa->ifa_local;
787 rv = 0; 801 rv = 0;
788 } 802 }
789 rtnl_unlock();
790out: 803out:
804 rcu_read_unlock();
791 return rv; 805 return rv;
792} 806}
793 807
@@ -823,7 +837,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
823 struct endnode_hello_message *msg; 837 struct endnode_hello_message *msg;
824 struct sk_buff *skb = NULL; 838 struct sk_buff *skb = NULL;
825 __le16 *pktlen; 839 __le16 *pktlen;
826 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 840 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
827 841
828 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL) 842 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
829 return; 843 return;
@@ -889,7 +903,7 @@ static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn
889static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa) 903static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
890{ 904{
891 int n; 905 int n;
892 struct dn_dev *dn_db = dev->dn_ptr; 906 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
893 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; 907 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
894 struct sk_buff *skb; 908 struct sk_buff *skb;
895 size_t size; 909 size_t size;
@@ -960,7 +974,7 @@ static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
960 974
961static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa) 975static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
962{ 976{
963 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 977 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
964 978
965 if (dn_db->parms.forwarding == 0) 979 if (dn_db->parms.forwarding == 0)
966 dn_send_endnode_hello(dev, ifa); 980 dn_send_endnode_hello(dev, ifa);
@@ -998,7 +1012,7 @@ static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
998 1012
999static int dn_eth_up(struct net_device *dev) 1013static int dn_eth_up(struct net_device *dev)
1000{ 1014{
1001 struct dn_dev *dn_db = dev->dn_ptr; 1015 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1002 1016
1003 if (dn_db->parms.forwarding == 0) 1017 if (dn_db->parms.forwarding == 0)
1004 dev_mc_add(dev, dn_rt_all_end_mcast); 1018 dev_mc_add(dev, dn_rt_all_end_mcast);
@@ -1012,7 +1026,7 @@ static int dn_eth_up(struct net_device *dev)
1012 1026
1013static void dn_eth_down(struct net_device *dev) 1027static void dn_eth_down(struct net_device *dev)
1014{ 1028{
1015 struct dn_dev *dn_db = dev->dn_ptr; 1029 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1016 1030
1017 if (dn_db->parms.forwarding == 0) 1031 if (dn_db->parms.forwarding == 0)
1018 dev_mc_del(dev, dn_rt_all_end_mcast); 1032 dev_mc_del(dev, dn_rt_all_end_mcast);
@@ -1025,12 +1039,16 @@ static void dn_dev_set_timer(struct net_device *dev);
1025static void dn_dev_timer_func(unsigned long arg) 1039static void dn_dev_timer_func(unsigned long arg)
1026{ 1040{
1027 struct net_device *dev = (struct net_device *)arg; 1041 struct net_device *dev = (struct net_device *)arg;
1028 struct dn_dev *dn_db = dev->dn_ptr; 1042 struct dn_dev *dn_db;
1029 struct dn_ifaddr *ifa; 1043 struct dn_ifaddr *ifa;
1030 1044
1045 rcu_read_lock();
1046 dn_db = rcu_dereference(dev->dn_ptr);
1031 if (dn_db->t3 <= dn_db->parms.t2) { 1047 if (dn_db->t3 <= dn_db->parms.t2) {
1032 if (dn_db->parms.timer3) { 1048 if (dn_db->parms.timer3) {
1033 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 1049 for (ifa = rcu_dereference(dn_db->ifa_list);
1050 ifa;
1051 ifa = rcu_dereference(ifa->ifa_next)) {
1034 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) 1052 if (!(ifa->ifa_flags & IFA_F_SECONDARY))
1035 dn_db->parms.timer3(dev, ifa); 1053 dn_db->parms.timer3(dev, ifa);
1036 } 1054 }
@@ -1039,13 +1057,13 @@ static void dn_dev_timer_func(unsigned long arg)
1039 } else { 1057 } else {
1040 dn_db->t3 -= dn_db->parms.t2; 1058 dn_db->t3 -= dn_db->parms.t2;
1041 } 1059 }
1042 1060 rcu_read_unlock();
1043 dn_dev_set_timer(dev); 1061 dn_dev_set_timer(dev);
1044} 1062}
1045 1063
1046static void dn_dev_set_timer(struct net_device *dev) 1064static void dn_dev_set_timer(struct net_device *dev)
1047{ 1065{
1048 struct dn_dev *dn_db = dev->dn_ptr; 1066 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1049 1067
1050 if (dn_db->parms.t2 > dn_db->parms.t3) 1068 if (dn_db->parms.t2 > dn_db->parms.t3)
1051 dn_db->parms.t2 = dn_db->parms.t3; 1069 dn_db->parms.t2 = dn_db->parms.t3;
@@ -1077,8 +1095,8 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1077 return NULL; 1095 return NULL;
1078 1096
1079 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1097 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1080 smp_wmb(); 1098
1081 dev->dn_ptr = dn_db; 1099 rcu_assign_pointer(dev->dn_ptr, dn_db);
1082 dn_db->dev = dev; 1100 dn_db->dev = dev;
1083 init_timer(&dn_db->timer); 1101 init_timer(&dn_db->timer);
1084 1102
@@ -1086,7 +1104,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1086 1104
1087 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); 1105 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
1088 if (!dn_db->neigh_parms) { 1106 if (!dn_db->neigh_parms) {
1089 dev->dn_ptr = NULL; 1107 rcu_assign_pointer(dev->dn_ptr, NULL);
1090 kfree(dn_db); 1108 kfree(dn_db);
1091 return NULL; 1109 return NULL;
1092 } 1110 }
@@ -1125,7 +1143,7 @@ void dn_dev_up(struct net_device *dev)
1125 struct dn_ifaddr *ifa; 1143 struct dn_ifaddr *ifa;
1126 __le16 addr = decnet_address; 1144 __le16 addr = decnet_address;
1127 int maybe_default = 0; 1145 int maybe_default = 0;
1128 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 1146 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
1129 1147
1130 if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) 1148 if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
1131 return; 1149 return;
@@ -1176,7 +1194,7 @@ void dn_dev_up(struct net_device *dev)
1176 1194
1177static void dn_dev_delete(struct net_device *dev) 1195static void dn_dev_delete(struct net_device *dev)
1178{ 1196{
1179 struct dn_dev *dn_db = dev->dn_ptr; 1197 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
1180 1198
1181 if (dn_db == NULL) 1199 if (dn_db == NULL)
1182 return; 1200 return;
@@ -1204,13 +1222,13 @@ static void dn_dev_delete(struct net_device *dev)
1204 1222
1205void dn_dev_down(struct net_device *dev) 1223void dn_dev_down(struct net_device *dev)
1206{ 1224{
1207 struct dn_dev *dn_db = dev->dn_ptr; 1225 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
1208 struct dn_ifaddr *ifa; 1226 struct dn_ifaddr *ifa;
1209 1227
1210 if (dn_db == NULL) 1228 if (dn_db == NULL)
1211 return; 1229 return;
1212 1230
1213 while((ifa = dn_db->ifa_list) != NULL) { 1231 while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) {
1214 dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0); 1232 dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
1215 dn_dev_free_ifa(ifa); 1233 dn_dev_free_ifa(ifa);
1216 } 1234 }
@@ -1270,7 +1288,7 @@ static inline int is_dn_dev(struct net_device *dev)
1270} 1288}
1271 1289
1272static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) 1290static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
1273 __acquires(rcu) 1291 __acquires(RCU)
1274{ 1292{
1275 int i; 1293 int i;
1276 struct net_device *dev; 1294 struct net_device *dev;
@@ -1313,7 +1331,7 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1313} 1331}
1314 1332
1315static void dn_dev_seq_stop(struct seq_file *seq, void *v) 1333static void dn_dev_seq_stop(struct seq_file *seq, void *v)
1316 __releases(rcu) 1334 __releases(RCU)
1317{ 1335{
1318 rcu_read_unlock(); 1336 rcu_read_unlock();
1319} 1337}
@@ -1340,7 +1358,7 @@ static int dn_dev_seq_show(struct seq_file *seq, void *v)
1340 struct net_device *dev = v; 1358 struct net_device *dev = v;
1341 char peer_buf[DN_ASCBUF_LEN]; 1359 char peer_buf[DN_ASCBUF_LEN];
1342 char router_buf[DN_ASCBUF_LEN]; 1360 char router_buf[DN_ASCBUF_LEN];
1343 struct dn_dev *dn_db = dev->dn_ptr; 1361 struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr);
1344 1362
1345 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu" 1363 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu"
1346 " %04hu %03d %02x %-10s %-7s %-7s\n", 1364 " %04hu %03d %02x %-10s %-7s %-7s\n",
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 4ab96c15166..0ef0a81bcd7 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -610,10 +610,12 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
610 /* Scan device list */ 610 /* Scan device list */
611 rcu_read_lock(); 611 rcu_read_lock();
612 for_each_netdev_rcu(&init_net, dev) { 612 for_each_netdev_rcu(&init_net, dev) {
613 dn_db = dev->dn_ptr; 613 dn_db = rcu_dereference(dev->dn_ptr);
614 if (dn_db == NULL) 614 if (dn_db == NULL)
615 continue; 615 continue;
616 for(ifa2 = dn_db->ifa_list; ifa2; ifa2 = ifa2->ifa_next) { 616 for (ifa2 = rcu_dereference(dn_db->ifa_list);
617 ifa2 != NULL;
618 ifa2 = rcu_dereference(ifa2->ifa_next)) {
617 if (ifa2->ifa_local == ifa->ifa_local) { 619 if (ifa2->ifa_local == ifa->ifa_local) {
618 found_it = 1; 620 found_it = 1;
619 break; 621 break;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index a085dbcf5c7..602dade7e9a 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -391,7 +391,7 @@ int dn_neigh_router_hello(struct sk_buff *skb)
391 write_lock(&neigh->lock); 391 write_lock(&neigh->lock);
392 392
393 neigh->used = jiffies; 393 neigh->used = jiffies;
394 dn_db = (struct dn_dev *)neigh->dev->dn_ptr; 394 dn_db = rcu_dereference(neigh->dev->dn_ptr);
395 395
396 if (!(neigh->nud_state & NUD_PERMANENT)) { 396 if (!(neigh->nud_state & NUD_PERMANENT)) {
397 neigh->updated = jiffies; 397 neigh->updated = jiffies;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index df0f3e54ff8..8280e43c886 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -93,7 +93,7 @@
93 93
94struct dn_rt_hash_bucket 94struct dn_rt_hash_bucket
95{ 95{
96 struct dn_route *chain; 96 struct dn_route __rcu *chain;
97 spinlock_t lock; 97 spinlock_t lock;
98}; 98};
99 99
@@ -157,15 +157,17 @@ static inline void dnrt_drop(struct dn_route *rt)
157static void dn_dst_check_expire(unsigned long dummy) 157static void dn_dst_check_expire(unsigned long dummy)
158{ 158{
159 int i; 159 int i;
160 struct dn_route *rt, **rtp; 160 struct dn_route *rt;
161 struct dn_route __rcu **rtp;
161 unsigned long now = jiffies; 162 unsigned long now = jiffies;
162 unsigned long expire = 120 * HZ; 163 unsigned long expire = 120 * HZ;
163 164
164 for(i = 0; i <= dn_rt_hash_mask; i++) { 165 for (i = 0; i <= dn_rt_hash_mask; i++) {
165 rtp = &dn_rt_hash_table[i].chain; 166 rtp = &dn_rt_hash_table[i].chain;
166 167
167 spin_lock(&dn_rt_hash_table[i].lock); 168 spin_lock(&dn_rt_hash_table[i].lock);
168 while((rt=*rtp) != NULL) { 169 while ((rt = rcu_dereference_protected(*rtp,
170 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
169 if (atomic_read(&rt->dst.__refcnt) || 171 if (atomic_read(&rt->dst.__refcnt) ||
170 (now - rt->dst.lastuse) < expire) { 172 (now - rt->dst.lastuse) < expire) {
171 rtp = &rt->dst.dn_next; 173 rtp = &rt->dst.dn_next;
@@ -186,17 +188,19 @@ static void dn_dst_check_expire(unsigned long dummy)
186 188
187static int dn_dst_gc(struct dst_ops *ops) 189static int dn_dst_gc(struct dst_ops *ops)
188{ 190{
189 struct dn_route *rt, **rtp; 191 struct dn_route *rt;
192 struct dn_route __rcu **rtp;
190 int i; 193 int i;
191 unsigned long now = jiffies; 194 unsigned long now = jiffies;
192 unsigned long expire = 10 * HZ; 195 unsigned long expire = 10 * HZ;
193 196
194 for(i = 0; i <= dn_rt_hash_mask; i++) { 197 for (i = 0; i <= dn_rt_hash_mask; i++) {
195 198
196 spin_lock_bh(&dn_rt_hash_table[i].lock); 199 spin_lock_bh(&dn_rt_hash_table[i].lock);
197 rtp = &dn_rt_hash_table[i].chain; 200 rtp = &dn_rt_hash_table[i].chain;
198 201
199 while((rt=*rtp) != NULL) { 202 while ((rt = rcu_dereference_protected(*rtp,
203 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
200 if (atomic_read(&rt->dst.__refcnt) || 204 if (atomic_read(&rt->dst.__refcnt) ||
201 (now - rt->dst.lastuse) < expire) { 205 (now - rt->dst.lastuse) < expire) {
202 rtp = &rt->dst.dn_next; 206 rtp = &rt->dst.dn_next;
@@ -227,7 +231,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
227{ 231{
228 u32 min_mtu = 230; 232 u32 min_mtu = 230;
229 struct dn_dev *dn = dst->neighbour ? 233 struct dn_dev *dn = dst->neighbour ?
230 (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL; 234 rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL;
231 235
232 if (dn && dn->use_long == 0) 236 if (dn && dn->use_long == 0)
233 min_mtu -= 6; 237 min_mtu -= 6;
@@ -267,23 +271,25 @@ static void dn_dst_link_failure(struct sk_buff *skb)
267 271
268static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 272static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
269{ 273{
270 return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) | 274 return ((fl1->fld_dst ^ fl2->fld_dst) |
271 (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) | 275 (fl1->fld_src ^ fl2->fld_src) |
272 (fl1->mark ^ fl2->mark) | 276 (fl1->mark ^ fl2->mark) |
273 (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) | 277 (fl1->fld_scope ^ fl2->fld_scope) |
274 (fl1->oif ^ fl2->oif) | 278 (fl1->oif ^ fl2->oif) |
275 (fl1->iif ^ fl2->iif)) == 0; 279 (fl1->iif ^ fl2->iif)) == 0;
276} 280}
277 281
278static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 282static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
279{ 283{
280 struct dn_route *rth, **rthp; 284 struct dn_route *rth;
285 struct dn_route __rcu **rthp;
281 unsigned long now = jiffies; 286 unsigned long now = jiffies;
282 287
283 rthp = &dn_rt_hash_table[hash].chain; 288 rthp = &dn_rt_hash_table[hash].chain;
284 289
285 spin_lock_bh(&dn_rt_hash_table[hash].lock); 290 spin_lock_bh(&dn_rt_hash_table[hash].lock);
286 while((rth = *rthp) != NULL) { 291 while ((rth = rcu_dereference_protected(*rthp,
292 lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
287 if (compare_keys(&rth->fl, &rt->fl)) { 293 if (compare_keys(&rth->fl, &rt->fl)) {
288 /* Put it first */ 294 /* Put it first */
289 *rthp = rth->dst.dn_next; 295 *rthp = rth->dst.dn_next;
@@ -315,15 +321,15 @@ static void dn_run_flush(unsigned long dummy)
315 int i; 321 int i;
316 struct dn_route *rt, *next; 322 struct dn_route *rt, *next;
317 323
318 for(i = 0; i < dn_rt_hash_mask; i++) { 324 for (i = 0; i < dn_rt_hash_mask; i++) {
319 spin_lock_bh(&dn_rt_hash_table[i].lock); 325 spin_lock_bh(&dn_rt_hash_table[i].lock);
320 326
321 if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL) 327 if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
322 goto nothing_to_declare; 328 goto nothing_to_declare;
323 329
324 for(; rt; rt=next) { 330 for(; rt; rt = next) {
325 next = rt->dst.dn_next; 331 next = rcu_dereference_raw(rt->dst.dn_next);
326 rt->dst.dn_next = NULL; 332 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
327 dst_free((struct dst_entry *)rt); 333 dst_free((struct dst_entry *)rt);
328 } 334 }
329 335
@@ -458,15 +464,16 @@ static int dn_return_long(struct sk_buff *skb)
458 */ 464 */
459static int dn_route_rx_packet(struct sk_buff *skb) 465static int dn_route_rx_packet(struct sk_buff *skb)
460{ 466{
461 struct dn_skb_cb *cb = DN_SKB_CB(skb); 467 struct dn_skb_cb *cb;
462 int err; 468 int err;
463 469
464 if ((err = dn_route_input(skb)) == 0) 470 if ((err = dn_route_input(skb)) == 0)
465 return dst_input(skb); 471 return dst_input(skb);
466 472
473 cb = DN_SKB_CB(skb);
467 if (decnet_debug_level & 4) { 474 if (decnet_debug_level & 4) {
468 char *devname = skb->dev ? skb->dev->name : "???"; 475 char *devname = skb->dev ? skb->dev->name : "???";
469 struct dn_skb_cb *cb = DN_SKB_CB(skb); 476
470 printk(KERN_DEBUG 477 printk(KERN_DEBUG
471 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", 478 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
472 (int)cb->rt_flags, devname, skb->len, 479 (int)cb->rt_flags, devname, skb->len,
@@ -573,7 +580,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
573 struct dn_skb_cb *cb; 580 struct dn_skb_cb *cb;
574 unsigned char flags = 0; 581 unsigned char flags = 0;
575 __u16 len = le16_to_cpu(*(__le16 *)skb->data); 582 __u16 len = le16_to_cpu(*(__le16 *)skb->data);
576 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; 583 struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
577 unsigned char padlen = 0; 584 unsigned char padlen = 0;
578 585
579 if (!net_eq(dev_net(dev), &init_net)) 586 if (!net_eq(dev_net(dev), &init_net))
@@ -728,7 +735,7 @@ static int dn_forward(struct sk_buff *skb)
728{ 735{
729 struct dn_skb_cb *cb = DN_SKB_CB(skb); 736 struct dn_skb_cb *cb = DN_SKB_CB(skb);
730 struct dst_entry *dst = skb_dst(skb); 737 struct dst_entry *dst = skb_dst(skb);
731 struct dn_dev *dn_db = dst->dev->dn_ptr; 738 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
732 struct dn_route *rt; 739 struct dn_route *rt;
733 struct neighbour *neigh = dst->neighbour; 740 struct neighbour *neigh = dst->neighbour;
734 int header_len; 741 int header_len;
@@ -835,13 +842,16 @@ static inline int dn_match_addr(__le16 addr1, __le16 addr2)
835static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) 842static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
836{ 843{
837 __le16 saddr = 0; 844 __le16 saddr = 0;
838 struct dn_dev *dn_db = dev->dn_ptr; 845 struct dn_dev *dn_db;
839 struct dn_ifaddr *ifa; 846 struct dn_ifaddr *ifa;
840 int best_match = 0; 847 int best_match = 0;
841 int ret; 848 int ret;
842 849
843 read_lock(&dev_base_lock); 850 rcu_read_lock();
844 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 851 dn_db = rcu_dereference(dev->dn_ptr);
852 for (ifa = rcu_dereference(dn_db->ifa_list);
853 ifa != NULL;
854 ifa = rcu_dereference(ifa->ifa_next)) {
845 if (ifa->ifa_scope > scope) 855 if (ifa->ifa_scope > scope)
846 continue; 856 continue;
847 if (!daddr) { 857 if (!daddr) {
@@ -854,7 +864,7 @@ static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int
854 if (best_match == 0) 864 if (best_match == 0)
855 saddr = ifa->ifa_local; 865 saddr = ifa->ifa_local;
856 } 866 }
857 read_unlock(&dev_base_lock); 867 rcu_read_unlock();
858 868
859 return saddr; 869 return saddr;
860} 870}
@@ -872,11 +882,9 @@ static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_re
872 882
873static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) 883static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
874{ 884{
875 struct flowi fl = { .nl_u = { .dn_u = 885 struct flowi fl = { .fld_dst = oldflp->fld_dst,
876 { .daddr = oldflp->fld_dst, 886 .fld_src = oldflp->fld_src,
877 .saddr = oldflp->fld_src, 887 .fld_scope = RT_SCOPE_UNIVERSE,
878 .scope = RT_SCOPE_UNIVERSE,
879 } },
880 .mark = oldflp->mark, 888 .mark = oldflp->mark,
881 .iif = init_net.loopback_dev->ifindex, 889 .iif = init_net.loopback_dev->ifindex,
882 .oif = oldflp->oif }; 890 .oif = oldflp->oif };
@@ -1020,7 +1028,7 @@ source_ok:
1020 err = -ENODEV; 1028 err = -ENODEV;
1021 if (dev_out == NULL) 1029 if (dev_out == NULL)
1022 goto out; 1030 goto out;
1023 dn_db = dev_out->dn_ptr; 1031 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
1024 /* Possible improvement - check all devices for local addr */ 1032 /* Possible improvement - check all devices for local addr */
1025 if (dn_dev_islocal(dev_out, fl.fld_dst)) { 1033 if (dn_dev_islocal(dev_out, fl.fld_dst)) {
1026 dev_put(dev_out); 1034 dev_put(dev_out);
@@ -1171,7 +1179,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
1171 if ((flp->fld_dst == rt->fl.fld_dst) && 1179 if ((flp->fld_dst == rt->fl.fld_dst) &&
1172 (flp->fld_src == rt->fl.fld_src) && 1180 (flp->fld_src == rt->fl.fld_src) &&
1173 (flp->mark == rt->fl.mark) && 1181 (flp->mark == rt->fl.mark) &&
1174 (rt->fl.iif == 0) && 1182 dn_is_output_route(rt) &&
1175 (rt->fl.oif == flp->oif)) { 1183 (rt->fl.oif == flp->oif)) {
1176 dst_use(&rt->dst, jiffies); 1184 dst_use(&rt->dst, jiffies);
1177 rcu_read_unlock_bh(); 1185 rcu_read_unlock_bh();
@@ -1220,11 +1228,9 @@ static int dn_route_input_slow(struct sk_buff *skb)
1220 int flags = 0; 1228 int flags = 0;
1221 __le16 gateway = 0; 1229 __le16 gateway = 0;
1222 __le16 local_src = 0; 1230 __le16 local_src = 0;
1223 struct flowi fl = { .nl_u = { .dn_u = 1231 struct flowi fl = { .fld_dst = cb->dst,
1224 { .daddr = cb->dst, 1232 .fld_src = cb->src,
1225 .saddr = cb->src, 1233 .fld_scope = RT_SCOPE_UNIVERSE,
1226 .scope = RT_SCOPE_UNIVERSE,
1227 } },
1228 .mark = skb->mark, 1234 .mark = skb->mark,
1229 .iif = skb->dev->ifindex }; 1235 .iif = skb->dev->ifindex };
1230 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; 1236 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
@@ -1233,7 +1239,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1233 1239
1234 dev_hold(in_dev); 1240 dev_hold(in_dev);
1235 1241
1236 if ((dn_db = in_dev->dn_ptr) == NULL) 1242 if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
1237 goto out; 1243 goto out;
1238 1244
1239 /* Zero source addresses are not allowed */ 1245 /* Zero source addresses are not allowed */
@@ -1502,7 +1508,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1502 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, 1508 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
1503 rt->dst.error) < 0) 1509 rt->dst.error) < 0)
1504 goto rtattr_failure; 1510 goto rtattr_failure;
1505 if (rt->fl.iif) 1511 if (dn_is_input_route(rt))
1506 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); 1512 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
1507 1513
1508 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1514 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -1677,15 +1683,15 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
1677{ 1683{
1678 struct dn_rt_cache_iter_state *s = seq->private; 1684 struct dn_rt_cache_iter_state *s = seq->private;
1679 1685
1680 rt = rt->dst.dn_next; 1686 rt = rcu_dereference_bh(rt->dst.dn_next);
1681 while(!rt) { 1687 while (!rt) {
1682 rcu_read_unlock_bh(); 1688 rcu_read_unlock_bh();
1683 if (--s->bucket < 0) 1689 if (--s->bucket < 0)
1684 break; 1690 break;
1685 rcu_read_lock_bh(); 1691 rcu_read_lock_bh();
1686 rt = dn_rt_hash_table[s->bucket].chain; 1692 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1687 } 1693 }
1688 return rcu_dereference_bh(rt); 1694 return rt;
1689} 1695}
1690 1696
1691static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1697static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 48fdf10be7a..6eb91df3c55 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -175,7 +175,7 @@ static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
175 175
176unsigned dnet_addr_type(__le16 addr) 176unsigned dnet_addr_type(__le16 addr)
177{ 177{
178 struct flowi fl = { .nl_u = { .dn_u = { .daddr = addr } } }; 178 struct flowi fl = { .fld_dst = addr };
179 struct dn_fib_res res; 179 struct dn_fib_res res;
180 unsigned ret = RTN_UNICAST; 180 unsigned ret = RTN_UNICAST;
181 struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0); 181 struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
diff --git a/net/dns_resolver/Makefile b/net/dns_resolver/Makefile
index c0ef4e71dc4..d5c13c2eb36 100644
--- a/net/dns_resolver/Makefile
+++ b/net/dns_resolver/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_DNS_RESOLVER) += dns_resolver.o 5obj-$(CONFIG_DNS_RESOLVER) += dns_resolver.o
6 6
7dns_resolver-objs := dns_key.o dns_query.o 7dns_resolver-y := dns_key.o dns_query.o
diff --git a/net/econet/Makefile b/net/econet/Makefile
index 39f0a77abdb..05fae8be2fe 100644
--- a/net/econet/Makefile
+++ b/net/econet/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_ECONET) += econet.o 5obj-$(CONFIG_ECONET) += econet.o
6 6
7econet-objs := af_econet.o 7econet-y := af_econet.o
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 93c91b633a5..6df6ecf4970 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -52,11 +52,11 @@ struct net_device *ieee802154_get_dev(struct net *net,
52 52
53 switch (addr->addr_type) { 53 switch (addr->addr_type) {
54 case IEEE802154_ADDR_LONG: 54 case IEEE802154_ADDR_LONG:
55 rtnl_lock(); 55 rcu_read_lock();
56 dev = dev_getbyhwaddr(net, ARPHRD_IEEE802154, addr->hwaddr); 56 dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, addr->hwaddr);
57 if (dev) 57 if (dev)
58 dev_hold(dev); 58 dev_hold(dev);
59 rtnl_unlock(); 59 rcu_read_unlock();
60 break; 60 break;
61 case IEEE802154_ADDR_SHORT: 61 case IEEE802154_ADDR_SHORT:
62 if (addr->pan_id == 0xffff || 62 if (addr->pan_id == 0xffff ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f581f77d109..f2b61107df6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1148,21 +1148,13 @@ int inet_sk_rebuild_header(struct sock *sk)
1148 struct flowi fl = { 1148 struct flowi fl = {
1149 .oif = sk->sk_bound_dev_if, 1149 .oif = sk->sk_bound_dev_if,
1150 .mark = sk->sk_mark, 1150 .mark = sk->sk_mark,
1151 .nl_u = { 1151 .fl4_dst = daddr,
1152 .ip4_u = { 1152 .fl4_src = inet->inet_saddr,
1153 .daddr = daddr, 1153 .fl4_tos = RT_CONN_FLAGS(sk),
1154 .saddr = inet->inet_saddr,
1155 .tos = RT_CONN_FLAGS(sk),
1156 },
1157 },
1158 .proto = sk->sk_protocol, 1154 .proto = sk->sk_protocol,
1159 .flags = inet_sk_flowi_flags(sk), 1155 .flags = inet_sk_flowi_flags(sk),
1160 .uli_u = { 1156 .fl_ip_sport = inet->inet_sport,
1161 .ports = { 1157 .fl_ip_dport = inet->inet_dport,
1162 .sport = inet->inet_sport,
1163 .dport = inet->inet_dport,
1164 },
1165 },
1166 }; 1158 };
1167 1159
1168 security_sk_classify_flow(sk, &fl); 1160 security_sk_classify_flow(sk, &fl);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index d8e540c5b07..a2fc7b961db 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -433,8 +433,8 @@ static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
433 433
434static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) 434static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
435{ 435{
436 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip, 436 struct flowi fl = { .fl4_dst = sip,
437 .saddr = tip } } }; 437 .fl4_src = tip };
438 struct rtable *rt; 438 struct rtable *rt;
439 int flag = 0; 439 int flag = 0;
440 /*unsigned long now; */ 440 /*unsigned long now; */
@@ -883,7 +883,7 @@ static int arp_process(struct sk_buff *skb)
883 883
884 dont_send = arp_ignore(in_dev, sip, tip); 884 dont_send = arp_ignore(in_dev, sip, tip);
885 if (!dont_send && IN_DEV_ARPFILTER(in_dev)) 885 if (!dont_send && IN_DEV_ARPFILTER(in_dev))
886 dont_send |= arp_filter(sip, tip, dev); 886 dont_send = arp_filter(sip, tip, dev);
887 if (!dont_send) { 887 if (!dont_send) {
888 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 888 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
889 if (n) { 889 if (n) {
@@ -1017,13 +1017,14 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
1017 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; 1017 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
1018 return 0; 1018 return 0;
1019 } 1019 }
1020 if (__in_dev_get_rtnl(dev)) { 1020 if (__in_dev_get_rcu(dev)) {
1021 IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on); 1021 IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
1022 return 0; 1022 return 0;
1023 } 1023 }
1024 return -ENXIO; 1024 return -ENXIO;
1025} 1025}
1026 1026
1027/* must be called with rcu_read_lock() */
1027static int arp_req_set_public(struct net *net, struct arpreq *r, 1028static int arp_req_set_public(struct net *net, struct arpreq *r,
1028 struct net_device *dev) 1029 struct net_device *dev)
1029{ 1030{
@@ -1033,7 +1034,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
1033 if (mask && mask != htonl(0xFFFFFFFF)) 1034 if (mask && mask != htonl(0xFFFFFFFF))
1034 return -EINVAL; 1035 return -EINVAL;
1035 if (!dev && (r->arp_flags & ATF_COM)) { 1036 if (!dev && (r->arp_flags & ATF_COM)) {
1036 dev = dev_getbyhwaddr(net, r->arp_ha.sa_family, 1037 dev = dev_getbyhwaddr_rcu(net, r->arp_ha.sa_family,
1037 r->arp_ha.sa_data); 1038 r->arp_ha.sa_data);
1038 if (!dev) 1039 if (!dev)
1039 return -ENODEV; 1040 return -ENODEV;
@@ -1061,8 +1062,8 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1061 if (r->arp_flags & ATF_PERM) 1062 if (r->arp_flags & ATF_PERM)
1062 r->arp_flags |= ATF_COM; 1063 r->arp_flags |= ATF_COM;
1063 if (dev == NULL) { 1064 if (dev == NULL) {
1064 struct flowi fl = { .nl_u.ip4_u = { .daddr = ip, 1065 struct flowi fl = { .fl4_dst = ip,
1065 .tos = RTO_ONLINK } }; 1066 .fl4_tos = RTO_ONLINK };
1066 struct rtable *rt; 1067 struct rtable *rt;
1067 err = ip_route_output_key(net, &rt, &fl); 1068 err = ip_route_output_key(net, &rt, &fl);
1068 if (err != 0) 1069 if (err != 0)
@@ -1169,8 +1170,8 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1169 1170
1170 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; 1171 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
1171 if (dev == NULL) { 1172 if (dev == NULL) {
1172 struct flowi fl = { .nl_u.ip4_u = { .daddr = ip, 1173 struct flowi fl = { .fl4_dst = ip,
1173 .tos = RTO_ONLINK } }; 1174 .fl4_tos = RTO_ONLINK };
1174 struct rtable *rt; 1175 struct rtable *rt;
1175 err = ip_route_output_key(net, &rt, &fl); 1176 err = ip_route_output_key(net, &rt, &fl);
1176 if (err != 0) 1177 if (err != 0)
@@ -1225,10 +1226,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1225 if (!(r.arp_flags & ATF_NETMASK)) 1226 if (!(r.arp_flags & ATF_NETMASK))
1226 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr = 1227 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
1227 htonl(0xFFFFFFFFUL); 1228 htonl(0xFFFFFFFFUL);
1228 rtnl_lock(); 1229 rcu_read_lock();
1229 if (r.arp_dev[0]) { 1230 if (r.arp_dev[0]) {
1230 err = -ENODEV; 1231 err = -ENODEV;
1231 dev = __dev_get_by_name(net, r.arp_dev); 1232 dev = dev_get_by_name_rcu(net, r.arp_dev);
1232 if (dev == NULL) 1233 if (dev == NULL)
1233 goto out; 1234 goto out;
1234 1235
@@ -1252,12 +1253,12 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1252 break; 1253 break;
1253 case SIOCGARP: 1254 case SIOCGARP:
1254 err = arp_req_get(&r, dev); 1255 err = arp_req_get(&r, dev);
1255 if (!err && copy_to_user(arg, &r, sizeof(r)))
1256 err = -EFAULT;
1257 break; 1256 break;
1258 } 1257 }
1259out: 1258out:
1260 rtnl_unlock(); 1259 rcu_read_unlock();
1260 if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
1261 err = -EFAULT;
1261 return err; 1262 return err;
1262} 1263}
1263 1264
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index dc94b0316b7..3b067704ab3 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1256,6 +1256,87 @@ errout:
1256 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err); 1256 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1257} 1257}
1258 1258
1259static size_t inet_get_link_af_size(const struct net_device *dev)
1260{
1261 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1262
1263 if (!in_dev)
1264 return 0;
1265
1266 return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1267}
1268
1269static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
1270{
1271 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1272 struct nlattr *nla;
1273 int i;
1274
1275 if (!in_dev)
1276 return -ENODATA;
1277
1278 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1279 if (nla == NULL)
1280 return -EMSGSIZE;
1281
1282 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1283 ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1284
1285 return 0;
1286}
1287
1288static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1289 [IFLA_INET_CONF] = { .type = NLA_NESTED },
1290};
1291
1292static int inet_validate_link_af(const struct net_device *dev,
1293 const struct nlattr *nla)
1294{
1295 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1296 int err, rem;
1297
1298 if (dev && !__in_dev_get_rtnl(dev))
1299 return -EAFNOSUPPORT;
1300
1301 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
1302 if (err < 0)
1303 return err;
1304
1305 if (tb[IFLA_INET_CONF]) {
1306 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1307 int cfgid = nla_type(a);
1308
1309 if (nla_len(a) < 4)
1310 return -EINVAL;
1311
1312 if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1313 return -EINVAL;
1314 }
1315 }
1316
1317 return 0;
1318}
1319
1320static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1321{
1322 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1323 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1324 int rem;
1325
1326 if (!in_dev)
1327 return -EAFNOSUPPORT;
1328
1329 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
1330 BUG();
1331
1332 if (tb[IFLA_INET_CONF]) {
1333 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1334 ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1335 }
1336
1337 return 0;
1338}
1339
1259#ifdef CONFIG_SYSCTL 1340#ifdef CONFIG_SYSCTL
1260 1341
1261static void devinet_copy_dflt_conf(struct net *net, int i) 1342static void devinet_copy_dflt_conf(struct net *net, int i)
@@ -1619,6 +1700,14 @@ static __net_initdata struct pernet_operations devinet_ops = {
1619 .exit = devinet_exit_net, 1700 .exit = devinet_exit_net,
1620}; 1701};
1621 1702
1703static struct rtnl_af_ops inet_af_ops = {
1704 .family = AF_INET,
1705 .fill_link_af = inet_fill_link_af,
1706 .get_link_af_size = inet_get_link_af_size,
1707 .validate_link_af = inet_validate_link_af,
1708 .set_link_af = inet_set_link_af,
1709};
1710
1622void __init devinet_init(void) 1711void __init devinet_init(void)
1623{ 1712{
1624 register_pernet_subsys(&devinet_ops); 1713 register_pernet_subsys(&devinet_ops);
@@ -1626,6 +1715,8 @@ void __init devinet_init(void)
1626 register_gifconf(PF_INET, inet_gifconf); 1715 register_gifconf(PF_INET, inet_gifconf);
1627 register_netdevice_notifier(&ip_netdev_notifier); 1716 register_netdevice_notifier(&ip_netdev_notifier);
1628 1717
1718 rtnl_af_register(&inet_af_ops);
1719
1629 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL); 1720 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL);
1630 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL); 1721 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL);
1631 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr); 1722 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index eb6f69a8f27..d3a1112b9d9 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -158,11 +158,7 @@ static void fib_flush(struct net *net)
158struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref) 158struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
159{ 159{
160 struct flowi fl = { 160 struct flowi fl = {
161 .nl_u = { 161 .fl4_dst = addr,
162 .ip4_u = {
163 .daddr = addr
164 }
165 },
166 .flags = FLOWI_FLAG_MATCH_ANY_IIF 162 .flags = FLOWI_FLAG_MATCH_ANY_IIF
167 }; 163 };
168 struct fib_result res = { 0 }; 164 struct fib_result res = { 0 };
@@ -193,7 +189,7 @@ static inline unsigned __inet_dev_addr_type(struct net *net,
193 const struct net_device *dev, 189 const struct net_device *dev,
194 __be32 addr) 190 __be32 addr)
195{ 191{
196 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; 192 struct flowi fl = { .fl4_dst = addr };
197 struct fib_result res; 193 struct fib_result res;
198 unsigned ret = RTN_BROADCAST; 194 unsigned ret = RTN_BROADCAST;
199 struct fib_table *local_table; 195 struct fib_table *local_table;
@@ -247,13 +243,9 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
247{ 243{
248 struct in_device *in_dev; 244 struct in_device *in_dev;
249 struct flowi fl = { 245 struct flowi fl = {
250 .nl_u = { 246 .fl4_dst = src,
251 .ip4_u = { 247 .fl4_src = dst,
252 .daddr = src, 248 .fl4_tos = tos,
253 .saddr = dst,
254 .tos = tos
255 }
256 },
257 .mark = mark, 249 .mark = mark,
258 .iif = oif 250 .iif = oif
259 }; 251 };
@@ -853,13 +845,9 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
853 struct fib_result res; 845 struct fib_result res;
854 struct flowi fl = { 846 struct flowi fl = {
855 .mark = frn->fl_mark, 847 .mark = frn->fl_mark,
856 .nl_u = { 848 .fl4_dst = frn->fl_addr,
857 .ip4_u = { 849 .fl4_tos = frn->fl_tos,
858 .daddr = frn->fl_addr, 850 .fl4_scope = frn->fl_scope,
859 .tos = frn->fl_tos,
860 .scope = frn->fl_scope
861 }
862 }
863 }; 851 };
864 852
865#ifdef CONFIG_IP_MULTIPLE_TABLES 853#ifdef CONFIG_IP_MULTIPLE_TABLES
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3e0da3ef611..12d3dc3df1b 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -563,12 +563,8 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
563 rcu_read_lock(); 563 rcu_read_lock();
564 { 564 {
565 struct flowi fl = { 565 struct flowi fl = {
566 .nl_u = { 566 .fl4_dst = nh->nh_gw,
567 .ip4_u = { 567 .fl4_scope = cfg->fc_scope + 1,
568 .daddr = nh->nh_gw,
569 .scope = cfg->fc_scope + 1,
570 },
571 },
572 .oif = nh->nh_oif, 568 .oif = nh->nh_oif,
573 }; 569 };
574 570
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index e5d1a44bcbd..4aa1b7f01ea 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -386,10 +386,9 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
386 daddr = icmp_param->replyopts.faddr; 386 daddr = icmp_param->replyopts.faddr;
387 } 387 }
388 { 388 {
389 struct flowi fl = { .nl_u = { .ip4_u = 389 struct flowi fl = { .fl4_dst= daddr,
390 { .daddr = daddr, 390 .fl4_src = rt->rt_spec_dst,
391 .saddr = rt->rt_spec_dst, 391 .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
392 .tos = RT_TOS(ip_hdr(skb)->tos) } },
393 .proto = IPPROTO_ICMP }; 392 .proto = IPPROTO_ICMP };
394 security_skb_classify_flow(skb, &fl); 393 security_skb_classify_flow(skb, &fl);
395 if (ip_route_output_key(net, &rt, &fl)) 394 if (ip_route_output_key(net, &rt, &fl))
@@ -506,8 +505,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
506 struct net_device *dev = NULL; 505 struct net_device *dev = NULL;
507 506
508 rcu_read_lock(); 507 rcu_read_lock();
509 if (rt->fl.iif && 508 if (rt_is_input_route(rt) &&
510 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) 509 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
511 dev = dev_get_by_index_rcu(net, rt->fl.iif); 510 dev = dev_get_by_index_rcu(net, rt->fl.iif);
512 511
513 if (dev) 512 if (dev)
@@ -542,22 +541,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
542 541
543 { 542 {
544 struct flowi fl = { 543 struct flowi fl = {
545 .nl_u = { 544 .fl4_dst = icmp_param.replyopts.srr ?
546 .ip4_u = { 545 icmp_param.replyopts.faddr : iph->saddr,
547 .daddr = icmp_param.replyopts.srr ? 546 .fl4_src = saddr,
548 icmp_param.replyopts.faddr : 547 .fl4_tos = RT_TOS(tos),
549 iph->saddr,
550 .saddr = saddr,
551 .tos = RT_TOS(tos)
552 }
553 },
554 .proto = IPPROTO_ICMP, 548 .proto = IPPROTO_ICMP,
555 .uli_u = { 549 .fl_icmp_type = type,
556 .icmpt = { 550 .fl_icmp_code = code,
557 .type = type,
558 .code = code
559 }
560 }
561 }; 551 };
562 int err; 552 int err;
563 struct rtable *rt2; 553 struct rtable *rt2;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 3c53c2d89e3..e0e77e297de 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -149,21 +149,37 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc);
149static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 149static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
150 int sfcount, __be32 *psfsrc, int delta); 150 int sfcount, __be32 *psfsrc, int delta);
151 151
152
153static void ip_mc_list_reclaim(struct rcu_head *head)
154{
155 kfree(container_of(head, struct ip_mc_list, rcu));
156}
157
152static void ip_ma_put(struct ip_mc_list *im) 158static void ip_ma_put(struct ip_mc_list *im)
153{ 159{
154 if (atomic_dec_and_test(&im->refcnt)) { 160 if (atomic_dec_and_test(&im->refcnt)) {
155 in_dev_put(im->interface); 161 in_dev_put(im->interface);
156 kfree(im); 162 call_rcu(&im->rcu, ip_mc_list_reclaim);
157 } 163 }
158} 164}
159 165
166#define for_each_pmc_rcu(in_dev, pmc) \
167 for (pmc = rcu_dereference(in_dev->mc_list); \
168 pmc != NULL; \
169 pmc = rcu_dereference(pmc->next_rcu))
170
171#define for_each_pmc_rtnl(in_dev, pmc) \
172 for (pmc = rtnl_dereference(in_dev->mc_list); \
173 pmc != NULL; \
174 pmc = rtnl_dereference(pmc->next_rcu))
175
160#ifdef CONFIG_IP_MULTICAST 176#ifdef CONFIG_IP_MULTICAST
161 177
162/* 178/*
163 * Timer management 179 * Timer management
164 */ 180 */
165 181
166static __inline__ void igmp_stop_timer(struct ip_mc_list *im) 182static void igmp_stop_timer(struct ip_mc_list *im)
167{ 183{
168 spin_lock_bh(&im->lock); 184 spin_lock_bh(&im->lock);
169 if (del_timer(&im->timer)) 185 if (del_timer(&im->timer))
@@ -284,6 +300,8 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
284 return scount; 300 return scount;
285} 301}
286 302
303#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
304
287static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) 305static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
288{ 306{
289 struct sk_buff *skb; 307 struct sk_buff *skb;
@@ -292,14 +310,20 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
292 struct igmpv3_report *pig; 310 struct igmpv3_report *pig;
293 struct net *net = dev_net(dev); 311 struct net *net = dev_net(dev);
294 312
295 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 313 while (1) {
296 if (skb == NULL) 314 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
297 return NULL; 315 GFP_ATOMIC | __GFP_NOWARN);
316 if (skb)
317 break;
318 size >>= 1;
319 if (size < 256)
320 return NULL;
321 }
322 igmp_skb_size(skb) = size;
298 323
299 { 324 {
300 struct flowi fl = { .oif = dev->ifindex, 325 struct flowi fl = { .oif = dev->ifindex,
301 .nl_u = { .ip4_u = { 326 .fl4_dst = IGMPV3_ALL_MCR,
302 .daddr = IGMPV3_ALL_MCR } },
303 .proto = IPPROTO_IGMP }; 327 .proto = IPPROTO_IGMP };
304 if (ip_route_output_key(net, &rt, &fl)) { 328 if (ip_route_output_key(net, &rt, &fl)) {
305 kfree_skb(skb); 329 kfree_skb(skb);
@@ -384,7 +408,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
384 return skb; 408 return skb;
385} 409}
386 410
387#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ 411#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
388 skb_tailroom(skb)) : 0) 412 skb_tailroom(skb)) : 0)
389 413
390static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, 414static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
@@ -502,8 +526,8 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
502 int type; 526 int type;
503 527
504 if (!pmc) { 528 if (!pmc) {
505 read_lock(&in_dev->mc_list_lock); 529 rcu_read_lock();
506 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 530 for_each_pmc_rcu(in_dev, pmc) {
507 if (pmc->multiaddr == IGMP_ALL_HOSTS) 531 if (pmc->multiaddr == IGMP_ALL_HOSTS)
508 continue; 532 continue;
509 spin_lock_bh(&pmc->lock); 533 spin_lock_bh(&pmc->lock);
@@ -514,7 +538,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
514 skb = add_grec(skb, pmc, type, 0, 0); 538 skb = add_grec(skb, pmc, type, 0, 0);
515 spin_unlock_bh(&pmc->lock); 539 spin_unlock_bh(&pmc->lock);
516 } 540 }
517 read_unlock(&in_dev->mc_list_lock); 541 rcu_read_unlock();
518 } else { 542 } else {
519 spin_lock_bh(&pmc->lock); 543 spin_lock_bh(&pmc->lock);
520 if (pmc->sfcount[MCAST_EXCLUDE]) 544 if (pmc->sfcount[MCAST_EXCLUDE])
@@ -556,7 +580,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
556 struct sk_buff *skb = NULL; 580 struct sk_buff *skb = NULL;
557 int type, dtype; 581 int type, dtype;
558 582
559 read_lock(&in_dev->mc_list_lock); 583 rcu_read_lock();
560 spin_lock_bh(&in_dev->mc_tomb_lock); 584 spin_lock_bh(&in_dev->mc_tomb_lock);
561 585
562 /* deleted MCA's */ 586 /* deleted MCA's */
@@ -593,7 +617,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
593 spin_unlock_bh(&in_dev->mc_tomb_lock); 617 spin_unlock_bh(&in_dev->mc_tomb_lock);
594 618
595 /* change recs */ 619 /* change recs */
596 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 620 for_each_pmc_rcu(in_dev, pmc) {
597 spin_lock_bh(&pmc->lock); 621 spin_lock_bh(&pmc->lock);
598 if (pmc->sfcount[MCAST_EXCLUDE]) { 622 if (pmc->sfcount[MCAST_EXCLUDE]) {
599 type = IGMPV3_BLOCK_OLD_SOURCES; 623 type = IGMPV3_BLOCK_OLD_SOURCES;
@@ -616,7 +640,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
616 } 640 }
617 spin_unlock_bh(&pmc->lock); 641 spin_unlock_bh(&pmc->lock);
618 } 642 }
619 read_unlock(&in_dev->mc_list_lock); 643 rcu_read_unlock();
620 644
621 if (!skb) 645 if (!skb)
622 return; 646 return;
@@ -644,7 +668,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
644 668
645 { 669 {
646 struct flowi fl = { .oif = dev->ifindex, 670 struct flowi fl = { .oif = dev->ifindex,
647 .nl_u = { .ip4_u = { .daddr = dst } }, 671 .fl4_dst = dst,
648 .proto = IPPROTO_IGMP }; 672 .proto = IPPROTO_IGMP };
649 if (ip_route_output_key(net, &rt, &fl)) 673 if (ip_route_output_key(net, &rt, &fl))
650 return -1; 674 return -1;
@@ -813,14 +837,14 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group)
813 if (group == IGMP_ALL_HOSTS) 837 if (group == IGMP_ALL_HOSTS)
814 return; 838 return;
815 839
816 read_lock(&in_dev->mc_list_lock); 840 rcu_read_lock();
817 for (im=in_dev->mc_list; im!=NULL; im=im->next) { 841 for_each_pmc_rcu(in_dev, im) {
818 if (im->multiaddr == group) { 842 if (im->multiaddr == group) {
819 igmp_stop_timer(im); 843 igmp_stop_timer(im);
820 break; 844 break;
821 } 845 }
822 } 846 }
823 read_unlock(&in_dev->mc_list_lock); 847 rcu_read_unlock();
824} 848}
825 849
826static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, 850static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
@@ -906,8 +930,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
906 * - Use the igmp->igmp_code field as the maximum 930 * - Use the igmp->igmp_code field as the maximum
907 * delay possible 931 * delay possible
908 */ 932 */
909 read_lock(&in_dev->mc_list_lock); 933 rcu_read_lock();
910 for (im=in_dev->mc_list; im!=NULL; im=im->next) { 934 for_each_pmc_rcu(in_dev, im) {
911 int changed; 935 int changed;
912 936
913 if (group && group != im->multiaddr) 937 if (group && group != im->multiaddr)
@@ -925,7 +949,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
925 if (changed) 949 if (changed)
926 igmp_mod_timer(im, max_delay); 950 igmp_mod_timer(im, max_delay);
927 } 951 }
928 read_unlock(&in_dev->mc_list_lock); 952 rcu_read_unlock();
929} 953}
930 954
931/* called in rcu_read_lock() section */ 955/* called in rcu_read_lock() section */
@@ -961,7 +985,7 @@ int igmp_rcv(struct sk_buff *skb)
961 case IGMP_HOST_MEMBERSHIP_REPORT: 985 case IGMP_HOST_MEMBERSHIP_REPORT:
962 case IGMPV2_HOST_MEMBERSHIP_REPORT: 986 case IGMPV2_HOST_MEMBERSHIP_REPORT:
963 /* Is it our report looped back? */ 987 /* Is it our report looped back? */
964 if (skb_rtable(skb)->fl.iif == 0) 988 if (rt_is_output_route(skb_rtable(skb)))
965 break; 989 break;
966 /* don't rely on MC router hearing unicast reports */ 990 /* don't rely on MC router hearing unicast reports */
967 if (skb->pkt_type == PACKET_MULTICAST || 991 if (skb->pkt_type == PACKET_MULTICAST ||
@@ -1110,8 +1134,8 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
1110 kfree(pmc); 1134 kfree(pmc);
1111 } 1135 }
1112 /* clear dead sources, too */ 1136 /* clear dead sources, too */
1113 read_lock(&in_dev->mc_list_lock); 1137 rcu_read_lock();
1114 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 1138 for_each_pmc_rcu(in_dev, pmc) {
1115 struct ip_sf_list *psf, *psf_next; 1139 struct ip_sf_list *psf, *psf_next;
1116 1140
1117 spin_lock_bh(&pmc->lock); 1141 spin_lock_bh(&pmc->lock);
@@ -1123,7 +1147,7 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
1123 kfree(psf); 1147 kfree(psf);
1124 } 1148 }
1125 } 1149 }
1126 read_unlock(&in_dev->mc_list_lock); 1150 rcu_read_unlock();
1127} 1151}
1128#endif 1152#endif
1129 1153
@@ -1209,7 +1233,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1209 1233
1210 ASSERT_RTNL(); 1234 ASSERT_RTNL();
1211 1235
1212 for (im=in_dev->mc_list; im; im=im->next) { 1236 for_each_pmc_rtnl(in_dev, im) {
1213 if (im->multiaddr == addr) { 1237 if (im->multiaddr == addr) {
1214 im->users++; 1238 im->users++;
1215 ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0); 1239 ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
@@ -1217,7 +1241,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1217 } 1241 }
1218 } 1242 }
1219 1243
1220 im = kmalloc(sizeof(*im), GFP_KERNEL); 1244 im = kzalloc(sizeof(*im), GFP_KERNEL);
1221 if (!im) 1245 if (!im)
1222 goto out; 1246 goto out;
1223 1247
@@ -1227,26 +1251,18 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1227 im->multiaddr = addr; 1251 im->multiaddr = addr;
1228 /* initial mode is (EX, empty) */ 1252 /* initial mode is (EX, empty) */
1229 im->sfmode = MCAST_EXCLUDE; 1253 im->sfmode = MCAST_EXCLUDE;
1230 im->sfcount[MCAST_INCLUDE] = 0;
1231 im->sfcount[MCAST_EXCLUDE] = 1; 1254 im->sfcount[MCAST_EXCLUDE] = 1;
1232 im->sources = NULL;
1233 im->tomb = NULL;
1234 im->crcount = 0;
1235 atomic_set(&im->refcnt, 1); 1255 atomic_set(&im->refcnt, 1);
1236 spin_lock_init(&im->lock); 1256 spin_lock_init(&im->lock);
1237#ifdef CONFIG_IP_MULTICAST 1257#ifdef CONFIG_IP_MULTICAST
1238 im->tm_running = 0;
1239 setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im); 1258 setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
1240 im->unsolicit_count = IGMP_Unsolicited_Report_Count; 1259 im->unsolicit_count = IGMP_Unsolicited_Report_Count;
1241 im->reporter = 0;
1242 im->gsquery = 0;
1243#endif 1260#endif
1244 im->loaded = 0; 1261
1245 write_lock_bh(&in_dev->mc_list_lock); 1262 im->next_rcu = in_dev->mc_list;
1246 im->next = in_dev->mc_list;
1247 in_dev->mc_list = im;
1248 in_dev->mc_count++; 1263 in_dev->mc_count++;
1249 write_unlock_bh(&in_dev->mc_list_lock); 1264 rcu_assign_pointer(in_dev->mc_list, im);
1265
1250#ifdef CONFIG_IP_MULTICAST 1266#ifdef CONFIG_IP_MULTICAST
1251 igmpv3_del_delrec(in_dev, im->multiaddr); 1267 igmpv3_del_delrec(in_dev, im->multiaddr);
1252#endif 1268#endif
@@ -1260,26 +1276,32 @@ EXPORT_SYMBOL(ip_mc_inc_group);
1260 1276
1261/* 1277/*
1262 * Resend IGMP JOIN report; used for bonding. 1278 * Resend IGMP JOIN report; used for bonding.
1279 * Called with rcu_read_lock()
1263 */ 1280 */
1264void ip_mc_rejoin_group(struct ip_mc_list *im) 1281void ip_mc_rejoin_groups(struct in_device *in_dev)
1265{ 1282{
1266#ifdef CONFIG_IP_MULTICAST 1283#ifdef CONFIG_IP_MULTICAST
1267 struct in_device *in_dev = im->interface; 1284 struct ip_mc_list *im;
1285 int type;
1268 1286
1269 if (im->multiaddr == IGMP_ALL_HOSTS) 1287 for_each_pmc_rcu(in_dev, im) {
1270 return; 1288 if (im->multiaddr == IGMP_ALL_HOSTS)
1289 continue;
1271 1290
1272 /* a failover is happening and switches 1291 /* a failover is happening and switches
1273 * must be notified immediately */ 1292 * must be notified immediately
1274 if (IGMP_V1_SEEN(in_dev)) 1293 */
1275 igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT); 1294 if (IGMP_V1_SEEN(in_dev))
1276 else if (IGMP_V2_SEEN(in_dev)) 1295 type = IGMP_HOST_MEMBERSHIP_REPORT;
1277 igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT); 1296 else if (IGMP_V2_SEEN(in_dev))
1278 else 1297 type = IGMPV2_HOST_MEMBERSHIP_REPORT;
1279 igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT); 1298 else
1299 type = IGMPV3_HOST_MEMBERSHIP_REPORT;
1300 igmp_send_report(in_dev, im, type);
1301 }
1280#endif 1302#endif
1281} 1303}
1282EXPORT_SYMBOL(ip_mc_rejoin_group); 1304EXPORT_SYMBOL(ip_mc_rejoin_groups);
1283 1305
1284/* 1306/*
1285 * A socket has left a multicast group on device dev 1307 * A socket has left a multicast group on device dev
@@ -1287,17 +1309,18 @@ EXPORT_SYMBOL(ip_mc_rejoin_group);
1287 1309
1288void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) 1310void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1289{ 1311{
1290 struct ip_mc_list *i, **ip; 1312 struct ip_mc_list *i;
1313 struct ip_mc_list __rcu **ip;
1291 1314
1292 ASSERT_RTNL(); 1315 ASSERT_RTNL();
1293 1316
1294 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { 1317 for (ip = &in_dev->mc_list;
1318 (i = rtnl_dereference(*ip)) != NULL;
1319 ip = &i->next_rcu) {
1295 if (i->multiaddr == addr) { 1320 if (i->multiaddr == addr) {
1296 if (--i->users == 0) { 1321 if (--i->users == 0) {
1297 write_lock_bh(&in_dev->mc_list_lock); 1322 *ip = i->next_rcu;
1298 *ip = i->next;
1299 in_dev->mc_count--; 1323 in_dev->mc_count--;
1300 write_unlock_bh(&in_dev->mc_list_lock);
1301 igmp_group_dropped(i); 1324 igmp_group_dropped(i);
1302 1325
1303 if (!in_dev->dead) 1326 if (!in_dev->dead)
@@ -1316,34 +1339,34 @@ EXPORT_SYMBOL(ip_mc_dec_group);
1316 1339
1317void ip_mc_unmap(struct in_device *in_dev) 1340void ip_mc_unmap(struct in_device *in_dev)
1318{ 1341{
1319 struct ip_mc_list *i; 1342 struct ip_mc_list *pmc;
1320 1343
1321 ASSERT_RTNL(); 1344 ASSERT_RTNL();
1322 1345
1323 for (i = in_dev->mc_list; i; i = i->next) 1346 for_each_pmc_rtnl(in_dev, pmc)
1324 igmp_group_dropped(i); 1347 igmp_group_dropped(pmc);
1325} 1348}
1326 1349
1327void ip_mc_remap(struct in_device *in_dev) 1350void ip_mc_remap(struct in_device *in_dev)
1328{ 1351{
1329 struct ip_mc_list *i; 1352 struct ip_mc_list *pmc;
1330 1353
1331 ASSERT_RTNL(); 1354 ASSERT_RTNL();
1332 1355
1333 for (i = in_dev->mc_list; i; i = i->next) 1356 for_each_pmc_rtnl(in_dev, pmc)
1334 igmp_group_added(i); 1357 igmp_group_added(pmc);
1335} 1358}
1336 1359
1337/* Device going down */ 1360/* Device going down */
1338 1361
1339void ip_mc_down(struct in_device *in_dev) 1362void ip_mc_down(struct in_device *in_dev)
1340{ 1363{
1341 struct ip_mc_list *i; 1364 struct ip_mc_list *pmc;
1342 1365
1343 ASSERT_RTNL(); 1366 ASSERT_RTNL();
1344 1367
1345 for (i=in_dev->mc_list; i; i=i->next) 1368 for_each_pmc_rtnl(in_dev, pmc)
1346 igmp_group_dropped(i); 1369 igmp_group_dropped(pmc);
1347 1370
1348#ifdef CONFIG_IP_MULTICAST 1371#ifdef CONFIG_IP_MULTICAST
1349 in_dev->mr_ifc_count = 0; 1372 in_dev->mr_ifc_count = 0;
@@ -1374,7 +1397,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
1374 in_dev->mr_qrv = IGMP_Unsolicited_Report_Count; 1397 in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
1375#endif 1398#endif
1376 1399
1377 rwlock_init(&in_dev->mc_list_lock);
1378 spin_lock_init(&in_dev->mc_tomb_lock); 1400 spin_lock_init(&in_dev->mc_tomb_lock);
1379} 1401}
1380 1402
@@ -1382,14 +1404,14 @@ void ip_mc_init_dev(struct in_device *in_dev)
1382 1404
1383void ip_mc_up(struct in_device *in_dev) 1405void ip_mc_up(struct in_device *in_dev)
1384{ 1406{
1385 struct ip_mc_list *i; 1407 struct ip_mc_list *pmc;
1386 1408
1387 ASSERT_RTNL(); 1409 ASSERT_RTNL();
1388 1410
1389 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1411 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1390 1412
1391 for (i=in_dev->mc_list; i; i=i->next) 1413 for_each_pmc_rtnl(in_dev, pmc)
1392 igmp_group_added(i); 1414 igmp_group_added(pmc);
1393} 1415}
1394 1416
1395/* 1417/*
@@ -1405,24 +1427,19 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1405 /* Deactivate timers */ 1427 /* Deactivate timers */
1406 ip_mc_down(in_dev); 1428 ip_mc_down(in_dev);
1407 1429
1408 write_lock_bh(&in_dev->mc_list_lock); 1430 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1409 while ((i = in_dev->mc_list) != NULL) { 1431 in_dev->mc_list = i->next_rcu;
1410 in_dev->mc_list = i->next;
1411 in_dev->mc_count--; 1432 in_dev->mc_count--;
1412 write_unlock_bh(&in_dev->mc_list_lock); 1433
1413 igmp_group_dropped(i); 1434 igmp_group_dropped(i);
1414 ip_ma_put(i); 1435 ip_ma_put(i);
1415
1416 write_lock_bh(&in_dev->mc_list_lock);
1417 } 1436 }
1418 write_unlock_bh(&in_dev->mc_list_lock);
1419} 1437}
1420 1438
1421/* RTNL is locked */ 1439/* RTNL is locked */
1422static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) 1440static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1423{ 1441{
1424 struct flowi fl = { .nl_u = { .ip4_u = 1442 struct flowi fl = { .fl4_dst = imr->imr_multiaddr.s_addr };
1425 { .daddr = imr->imr_multiaddr.s_addr } } };
1426 struct rtable *rt; 1443 struct rtable *rt;
1427 struct net_device *dev = NULL; 1444 struct net_device *dev = NULL;
1428 struct in_device *idev = NULL; 1445 struct in_device *idev = NULL;
@@ -1513,18 +1530,18 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1513 1530
1514 if (!in_dev) 1531 if (!in_dev)
1515 return -ENODEV; 1532 return -ENODEV;
1516 read_lock(&in_dev->mc_list_lock); 1533 rcu_read_lock();
1517 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 1534 for_each_pmc_rcu(in_dev, pmc) {
1518 if (*pmca == pmc->multiaddr) 1535 if (*pmca == pmc->multiaddr)
1519 break; 1536 break;
1520 } 1537 }
1521 if (!pmc) { 1538 if (!pmc) {
1522 /* MCA not found?? bug */ 1539 /* MCA not found?? bug */
1523 read_unlock(&in_dev->mc_list_lock); 1540 rcu_read_unlock();
1524 return -ESRCH; 1541 return -ESRCH;
1525 } 1542 }
1526 spin_lock_bh(&pmc->lock); 1543 spin_lock_bh(&pmc->lock);
1527 read_unlock(&in_dev->mc_list_lock); 1544 rcu_read_unlock();
1528#ifdef CONFIG_IP_MULTICAST 1545#ifdef CONFIG_IP_MULTICAST
1529 sf_markstate(pmc); 1546 sf_markstate(pmc);
1530#endif 1547#endif
@@ -1685,18 +1702,18 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1685 1702
1686 if (!in_dev) 1703 if (!in_dev)
1687 return -ENODEV; 1704 return -ENODEV;
1688 read_lock(&in_dev->mc_list_lock); 1705 rcu_read_lock();
1689 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 1706 for_each_pmc_rcu(in_dev, pmc) {
1690 if (*pmca == pmc->multiaddr) 1707 if (*pmca == pmc->multiaddr)
1691 break; 1708 break;
1692 } 1709 }
1693 if (!pmc) { 1710 if (!pmc) {
1694 /* MCA not found?? bug */ 1711 /* MCA not found?? bug */
1695 read_unlock(&in_dev->mc_list_lock); 1712 rcu_read_unlock();
1696 return -ESRCH; 1713 return -ESRCH;
1697 } 1714 }
1698 spin_lock_bh(&pmc->lock); 1715 spin_lock_bh(&pmc->lock);
1699 read_unlock(&in_dev->mc_list_lock); 1716 rcu_read_unlock();
1700 1717
1701#ifdef CONFIG_IP_MULTICAST 1718#ifdef CONFIG_IP_MULTICAST
1702 sf_markstate(pmc); 1719 sf_markstate(pmc);
@@ -1793,7 +1810,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1793 1810
1794 err = -EADDRINUSE; 1811 err = -EADDRINUSE;
1795 ifindex = imr->imr_ifindex; 1812 ifindex = imr->imr_ifindex;
1796 for (i = inet->mc_list; i; i = i->next) { 1813 for_each_pmc_rtnl(inet, i) {
1797 if (i->multi.imr_multiaddr.s_addr == addr && 1814 if (i->multi.imr_multiaddr.s_addr == addr &&
1798 i->multi.imr_ifindex == ifindex) 1815 i->multi.imr_ifindex == ifindex)
1799 goto done; 1816 goto done;
@@ -1807,7 +1824,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1807 goto done; 1824 goto done;
1808 1825
1809 memcpy(&iml->multi, imr, sizeof(*imr)); 1826 memcpy(&iml->multi, imr, sizeof(*imr));
1810 iml->next = inet->mc_list; 1827 iml->next_rcu = inet->mc_list;
1811 iml->sflist = NULL; 1828 iml->sflist = NULL;
1812 iml->sfmode = MCAST_EXCLUDE; 1829 iml->sfmode = MCAST_EXCLUDE;
1813 rcu_assign_pointer(inet->mc_list, iml); 1830 rcu_assign_pointer(inet->mc_list, iml);
@@ -1821,17 +1838,14 @@ EXPORT_SYMBOL(ip_mc_join_group);
1821 1838
1822static void ip_sf_socklist_reclaim(struct rcu_head *rp) 1839static void ip_sf_socklist_reclaim(struct rcu_head *rp)
1823{ 1840{
1824 struct ip_sf_socklist *psf; 1841 kfree(container_of(rp, struct ip_sf_socklist, rcu));
1825
1826 psf = container_of(rp, struct ip_sf_socklist, rcu);
1827 /* sk_omem_alloc should have been decreased by the caller*/ 1842 /* sk_omem_alloc should have been decreased by the caller*/
1828 kfree(psf);
1829} 1843}
1830 1844
1831static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 1845static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1832 struct in_device *in_dev) 1846 struct in_device *in_dev)
1833{ 1847{
1834 struct ip_sf_socklist *psf = iml->sflist; 1848 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
1835 int err; 1849 int err;
1836 1850
1837 if (psf == NULL) { 1851 if (psf == NULL) {
@@ -1851,11 +1865,8 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1851 1865
1852static void ip_mc_socklist_reclaim(struct rcu_head *rp) 1866static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1853{ 1867{
1854 struct ip_mc_socklist *iml; 1868 kfree(container_of(rp, struct ip_mc_socklist, rcu));
1855
1856 iml = container_of(rp, struct ip_mc_socklist, rcu);
1857 /* sk_omem_alloc should have been decreased by the caller*/ 1869 /* sk_omem_alloc should have been decreased by the caller*/
1858 kfree(iml);
1859} 1870}
1860 1871
1861 1872
@@ -1866,7 +1877,8 @@ static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1866int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) 1877int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1867{ 1878{
1868 struct inet_sock *inet = inet_sk(sk); 1879 struct inet_sock *inet = inet_sk(sk);
1869 struct ip_mc_socklist *iml, **imlp; 1880 struct ip_mc_socklist *iml;
1881 struct ip_mc_socklist __rcu **imlp;
1870 struct in_device *in_dev; 1882 struct in_device *in_dev;
1871 struct net *net = sock_net(sk); 1883 struct net *net = sock_net(sk);
1872 __be32 group = imr->imr_multiaddr.s_addr; 1884 __be32 group = imr->imr_multiaddr.s_addr;
@@ -1876,7 +1888,9 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1876 rtnl_lock(); 1888 rtnl_lock();
1877 in_dev = ip_mc_find_dev(net, imr); 1889 in_dev = ip_mc_find_dev(net, imr);
1878 ifindex = imr->imr_ifindex; 1890 ifindex = imr->imr_ifindex;
1879 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { 1891 for (imlp = &inet->mc_list;
1892 (iml = rtnl_dereference(*imlp)) != NULL;
1893 imlp = &iml->next_rcu) {
1880 if (iml->multi.imr_multiaddr.s_addr != group) 1894 if (iml->multi.imr_multiaddr.s_addr != group)
1881 continue; 1895 continue;
1882 if (ifindex) { 1896 if (ifindex) {
@@ -1888,7 +1902,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1888 1902
1889 (void) ip_mc_leave_src(sk, iml, in_dev); 1903 (void) ip_mc_leave_src(sk, iml, in_dev);
1890 1904
1891 rcu_assign_pointer(*imlp, iml->next); 1905 *imlp = iml->next_rcu;
1892 1906
1893 if (in_dev) 1907 if (in_dev)
1894 ip_mc_dec_group(in_dev, group); 1908 ip_mc_dec_group(in_dev, group);
@@ -1934,7 +1948,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1934 } 1948 }
1935 err = -EADDRNOTAVAIL; 1949 err = -EADDRNOTAVAIL;
1936 1950
1937 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 1951 for_each_pmc_rtnl(inet, pmc) {
1938 if ((pmc->multi.imr_multiaddr.s_addr == 1952 if ((pmc->multi.imr_multiaddr.s_addr ==
1939 imr.imr_multiaddr.s_addr) && 1953 imr.imr_multiaddr.s_addr) &&
1940 (pmc->multi.imr_ifindex == imr.imr_ifindex)) 1954 (pmc->multi.imr_ifindex == imr.imr_ifindex))
@@ -1958,7 +1972,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1958 pmc->sfmode = omode; 1972 pmc->sfmode = omode;
1959 } 1973 }
1960 1974
1961 psl = pmc->sflist; 1975 psl = rtnl_dereference(pmc->sflist);
1962 if (!add) { 1976 if (!add) {
1963 if (!psl) 1977 if (!psl)
1964 goto done; /* err = -EADDRNOTAVAIL */ 1978 goto done; /* err = -EADDRNOTAVAIL */
@@ -2077,7 +2091,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2077 goto done; 2091 goto done;
2078 } 2092 }
2079 2093
2080 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2094 for_each_pmc_rtnl(inet, pmc) {
2081 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2095 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2082 pmc->multi.imr_ifindex == imr.imr_ifindex) 2096 pmc->multi.imr_ifindex == imr.imr_ifindex)
2083 break; 2097 break;
@@ -2107,7 +2121,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2107 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr, 2121 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2108 msf->imsf_fmode, 0, NULL, 0); 2122 msf->imsf_fmode, 0, NULL, 0);
2109 } 2123 }
2110 psl = pmc->sflist; 2124 psl = rtnl_dereference(pmc->sflist);
2111 if (psl) { 2125 if (psl) {
2112 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2126 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2113 psl->sl_count, psl->sl_addr, 0); 2127 psl->sl_count, psl->sl_addr, 0);
@@ -2155,7 +2169,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2155 } 2169 }
2156 err = -EADDRNOTAVAIL; 2170 err = -EADDRNOTAVAIL;
2157 2171
2158 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2172 for_each_pmc_rtnl(inet, pmc) {
2159 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2173 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2160 pmc->multi.imr_ifindex == imr.imr_ifindex) 2174 pmc->multi.imr_ifindex == imr.imr_ifindex)
2161 break; 2175 break;
@@ -2163,7 +2177,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2163 if (!pmc) /* must have a prior join */ 2177 if (!pmc) /* must have a prior join */
2164 goto done; 2178 goto done;
2165 msf->imsf_fmode = pmc->sfmode; 2179 msf->imsf_fmode = pmc->sfmode;
2166 psl = pmc->sflist; 2180 psl = rtnl_dereference(pmc->sflist);
2167 rtnl_unlock(); 2181 rtnl_unlock();
2168 if (!psl) { 2182 if (!psl) {
2169 len = 0; 2183 len = 0;
@@ -2208,7 +2222,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2208 2222
2209 err = -EADDRNOTAVAIL; 2223 err = -EADDRNOTAVAIL;
2210 2224
2211 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2225 for_each_pmc_rtnl(inet, pmc) {
2212 if (pmc->multi.imr_multiaddr.s_addr == addr && 2226 if (pmc->multi.imr_multiaddr.s_addr == addr &&
2213 pmc->multi.imr_ifindex == gsf->gf_interface) 2227 pmc->multi.imr_ifindex == gsf->gf_interface)
2214 break; 2228 break;
@@ -2216,7 +2230,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2216 if (!pmc) /* must have a prior join */ 2230 if (!pmc) /* must have a prior join */
2217 goto done; 2231 goto done;
2218 gsf->gf_fmode = pmc->sfmode; 2232 gsf->gf_fmode = pmc->sfmode;
2219 psl = pmc->sflist; 2233 psl = rtnl_dereference(pmc->sflist);
2220 rtnl_unlock(); 2234 rtnl_unlock();
2221 count = psl ? psl->sl_count : 0; 2235 count = psl ? psl->sl_count : 0;
2222 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 2236 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
@@ -2257,7 +2271,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2257 goto out; 2271 goto out;
2258 2272
2259 rcu_read_lock(); 2273 rcu_read_lock();
2260 for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) { 2274 for_each_pmc_rcu(inet, pmc) {
2261 if (pmc->multi.imr_multiaddr.s_addr == loc_addr && 2275 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2262 pmc->multi.imr_ifindex == dif) 2276 pmc->multi.imr_ifindex == dif)
2263 break; 2277 break;
@@ -2265,7 +2279,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2265 ret = inet->mc_all; 2279 ret = inet->mc_all;
2266 if (!pmc) 2280 if (!pmc)
2267 goto unlock; 2281 goto unlock;
2268 psl = pmc->sflist; 2282 psl = rcu_dereference(pmc->sflist);
2269 ret = (pmc->sfmode == MCAST_EXCLUDE); 2283 ret = (pmc->sfmode == MCAST_EXCLUDE);
2270 if (!psl) 2284 if (!psl)
2271 goto unlock; 2285 goto unlock;
@@ -2300,10 +2314,10 @@ void ip_mc_drop_socket(struct sock *sk)
2300 return; 2314 return;
2301 2315
2302 rtnl_lock(); 2316 rtnl_lock();
2303 while ((iml = inet->mc_list) != NULL) { 2317 while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
2304 struct in_device *in_dev; 2318 struct in_device *in_dev;
2305 rcu_assign_pointer(inet->mc_list, iml->next);
2306 2319
2320 inet->mc_list = iml->next_rcu;
2307 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2321 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2308 (void) ip_mc_leave_src(sk, iml, in_dev); 2322 (void) ip_mc_leave_src(sk, iml, in_dev);
2309 if (in_dev != NULL) 2323 if (in_dev != NULL)
@@ -2321,8 +2335,8 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
2321 struct ip_sf_list *psf; 2335 struct ip_sf_list *psf;
2322 int rv = 0; 2336 int rv = 0;
2323 2337
2324 read_lock(&in_dev->mc_list_lock); 2338 rcu_read_lock();
2325 for (im=in_dev->mc_list; im; im=im->next) { 2339 for_each_pmc_rcu(in_dev, im) {
2326 if (im->multiaddr == mc_addr) 2340 if (im->multiaddr == mc_addr)
2327 break; 2341 break;
2328 } 2342 }
@@ -2343,7 +2357,7 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
2343 } else 2357 } else
2344 rv = 1; /* unspecified source; tentatively allow */ 2358 rv = 1; /* unspecified source; tentatively allow */
2345 } 2359 }
2346 read_unlock(&in_dev->mc_list_lock); 2360 rcu_read_unlock();
2347 return rv; 2361 return rv;
2348} 2362}
2349 2363
@@ -2369,13 +2383,11 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2369 in_dev = __in_dev_get_rcu(state->dev); 2383 in_dev = __in_dev_get_rcu(state->dev);
2370 if (!in_dev) 2384 if (!in_dev)
2371 continue; 2385 continue;
2372 read_lock(&in_dev->mc_list_lock); 2386 im = rcu_dereference(in_dev->mc_list);
2373 im = in_dev->mc_list;
2374 if (im) { 2387 if (im) {
2375 state->in_dev = in_dev; 2388 state->in_dev = in_dev;
2376 break; 2389 break;
2377 } 2390 }
2378 read_unlock(&in_dev->mc_list_lock);
2379 } 2391 }
2380 return im; 2392 return im;
2381} 2393}
@@ -2383,11 +2395,9 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2383static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im) 2395static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
2384{ 2396{
2385 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2397 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2386 im = im->next;
2387 while (!im) {
2388 if (likely(state->in_dev != NULL))
2389 read_unlock(&state->in_dev->mc_list_lock);
2390 2398
2399 im = rcu_dereference(im->next_rcu);
2400 while (!im) {
2391 state->dev = next_net_device_rcu(state->dev); 2401 state->dev = next_net_device_rcu(state->dev);
2392 if (!state->dev) { 2402 if (!state->dev) {
2393 state->in_dev = NULL; 2403 state->in_dev = NULL;
@@ -2396,8 +2406,7 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
2396 state->in_dev = __in_dev_get_rcu(state->dev); 2406 state->in_dev = __in_dev_get_rcu(state->dev);
2397 if (!state->in_dev) 2407 if (!state->in_dev)
2398 continue; 2408 continue;
2399 read_lock(&state->in_dev->mc_list_lock); 2409 im = rcu_dereference(state->in_dev->mc_list);
2400 im = state->in_dev->mc_list;
2401 } 2410 }
2402 return im; 2411 return im;
2403} 2412}
@@ -2433,10 +2442,8 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2433 __releases(rcu) 2442 __releases(rcu)
2434{ 2443{
2435 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2444 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2436 if (likely(state->in_dev != NULL)) { 2445
2437 read_unlock(&state->in_dev->mc_list_lock); 2446 state->in_dev = NULL;
2438 state->in_dev = NULL;
2439 }
2440 state->dev = NULL; 2447 state->dev = NULL;
2441 rcu_read_unlock(); 2448 rcu_read_unlock();
2442} 2449}
@@ -2458,7 +2465,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2458 querier = "NONE"; 2465 querier = "NONE";
2459#endif 2466#endif
2460 2467
2461 if (state->in_dev->mc_list == im) { 2468 if (rcu_dereference(state->in_dev->mc_list) == im) {
2462 seq_printf(seq, "%d\t%-10s: %5d %7s\n", 2469 seq_printf(seq, "%d\t%-10s: %5d %7s\n",
2463 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); 2470 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
2464 } 2471 }
@@ -2517,8 +2524,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2517 idev = __in_dev_get_rcu(state->dev); 2524 idev = __in_dev_get_rcu(state->dev);
2518 if (unlikely(idev == NULL)) 2525 if (unlikely(idev == NULL))
2519 continue; 2526 continue;
2520 read_lock(&idev->mc_list_lock); 2527 im = rcu_dereference(idev->mc_list);
2521 im = idev->mc_list;
2522 if (likely(im != NULL)) { 2528 if (likely(im != NULL)) {
2523 spin_lock_bh(&im->lock); 2529 spin_lock_bh(&im->lock);
2524 psf = im->sources; 2530 psf = im->sources;
@@ -2529,7 +2535,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2529 } 2535 }
2530 spin_unlock_bh(&im->lock); 2536 spin_unlock_bh(&im->lock);
2531 } 2537 }
2532 read_unlock(&idev->mc_list_lock);
2533 } 2538 }
2534 return psf; 2539 return psf;
2535} 2540}
@@ -2543,9 +2548,6 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
2543 spin_unlock_bh(&state->im->lock); 2548 spin_unlock_bh(&state->im->lock);
2544 state->im = state->im->next; 2549 state->im = state->im->next;
2545 while (!state->im) { 2550 while (!state->im) {
2546 if (likely(state->idev != NULL))
2547 read_unlock(&state->idev->mc_list_lock);
2548
2549 state->dev = next_net_device_rcu(state->dev); 2551 state->dev = next_net_device_rcu(state->dev);
2550 if (!state->dev) { 2552 if (!state->dev) {
2551 state->idev = NULL; 2553 state->idev = NULL;
@@ -2554,8 +2556,7 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
2554 state->idev = __in_dev_get_rcu(state->dev); 2556 state->idev = __in_dev_get_rcu(state->dev);
2555 if (!state->idev) 2557 if (!state->idev)
2556 continue; 2558 continue;
2557 read_lock(&state->idev->mc_list_lock); 2559 state->im = rcu_dereference(state->idev->mc_list);
2558 state->im = state->idev->mc_list;
2559 } 2560 }
2560 if (!state->im) 2561 if (!state->im)
2561 break; 2562 break;
@@ -2601,10 +2602,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2601 spin_unlock_bh(&state->im->lock); 2602 spin_unlock_bh(&state->im->lock);
2602 state->im = NULL; 2603 state->im = NULL;
2603 } 2604 }
2604 if (likely(state->idev != NULL)) { 2605 state->idev = NULL;
2605 read_unlock(&state->idev->mc_list_lock);
2606 state->idev = NULL;
2607 }
2608 state->dev = NULL; 2606 state->dev = NULL;
2609 rcu_read_unlock(); 2607 rcu_read_unlock();
2610} 2608}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7174370b119..06f5f8f482f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -358,17 +358,14 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
358 struct ip_options *opt = inet_rsk(req)->opt; 358 struct ip_options *opt = inet_rsk(req)->opt;
359 struct flowi fl = { .oif = sk->sk_bound_dev_if, 359 struct flowi fl = { .oif = sk->sk_bound_dev_if,
360 .mark = sk->sk_mark, 360 .mark = sk->sk_mark,
361 .nl_u = { .ip4_u = 361 .fl4_dst = ((opt && opt->srr) ?
362 { .daddr = ((opt && opt->srr) ? 362 opt->faddr : ireq->rmt_addr),
363 opt->faddr : 363 .fl4_src = ireq->loc_addr,
364 ireq->rmt_addr), 364 .fl4_tos = RT_CONN_FLAGS(sk),
365 .saddr = ireq->loc_addr,
366 .tos = RT_CONN_FLAGS(sk) } },
367 .proto = sk->sk_protocol, 365 .proto = sk->sk_protocol,
368 .flags = inet_sk_flowi_flags(sk), 366 .flags = inet_sk_flowi_flags(sk),
369 .uli_u = { .ports = 367 .fl_ip_sport = inet_sk(sk)->inet_sport,
370 { .sport = inet_sk(sk)->inet_sport, 368 .fl_ip_dport = ireq->rmt_port };
371 .dport = ireq->rmt_port } } };
372 struct net *net = sock_net(sk); 369 struct net *net = sock_net(sk);
373 370
374 security_req_classify_flow(req, &fl); 371 security_req_classify_flow(req, &fl);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9e94d7cf4f8..d9bc85751c7 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -63,7 +63,7 @@
63 * refcnt: atomically against modifications on other CPU; 63 * refcnt: atomically against modifications on other CPU;
64 * usually under some other lock to prevent node disappearing 64 * usually under some other lock to prevent node disappearing
65 * dtime: unused node list lock 65 * dtime: unused node list lock
66 * v4daddr: unchangeable 66 * daddr: unchangeable
67 * ip_id_count: atomic value (no lock needed) 67 * ip_id_count: atomic value (no lock needed)
68 */ 68 */
69 69
@@ -79,15 +79,24 @@ static const struct inet_peer peer_fake_node = {
79 .avl_height = 0 79 .avl_height = 0
80}; 80};
81 81
82static struct { 82struct inet_peer_base {
83 struct inet_peer __rcu *root; 83 struct inet_peer __rcu *root;
84 spinlock_t lock; 84 spinlock_t lock;
85 int total; 85 int total;
86} peers = { 86};
87
88static struct inet_peer_base v4_peers = {
89 .root = peer_avl_empty_rcu,
90 .lock = __SPIN_LOCK_UNLOCKED(v4_peers.lock),
91 .total = 0,
92};
93
94static struct inet_peer_base v6_peers = {
87 .root = peer_avl_empty_rcu, 95 .root = peer_avl_empty_rcu,
88 .lock = __SPIN_LOCK_UNLOCKED(peers.lock), 96 .lock = __SPIN_LOCK_UNLOCKED(v6_peers.lock),
89 .total = 0, 97 .total = 0,
90}; 98};
99
91#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 100#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
92 101
93/* Exported for sysctl_net_ipv4. */ 102/* Exported for sysctl_net_ipv4. */
@@ -152,28 +161,45 @@ static void unlink_from_unused(struct inet_peer *p)
152 } 161 }
153} 162}
154 163
164static int addr_compare(const struct inetpeer_addr *a,
165 const struct inetpeer_addr *b)
166{
167 int i, n = (a->family == AF_INET ? 1 : 4);
168
169 for (i = 0; i < n; i++) {
170 if (a->a6[i] == b->a6[i])
171 continue;
172 if (a->a6[i] < b->a6[i])
173 return -1;
174 return 1;
175 }
176
177 return 0;
178}
179
155/* 180/*
156 * Called with local BH disabled and the pool lock held. 181 * Called with local BH disabled and the pool lock held.
157 */ 182 */
158#define lookup(_daddr, _stack) \ 183#define lookup(_daddr, _stack, _base) \
159({ \ 184({ \
160 struct inet_peer *u; \ 185 struct inet_peer *u; \
161 struct inet_peer __rcu **v; \ 186 struct inet_peer __rcu **v; \
162 \ 187 \
163 stackptr = _stack; \ 188 stackptr = _stack; \
164 *stackptr++ = &peers.root; \ 189 *stackptr++ = &_base->root; \
165 for (u = rcu_dereference_protected(peers.root, \ 190 for (u = rcu_dereference_protected(_base->root, \
166 lockdep_is_held(&peers.lock)); \ 191 lockdep_is_held(&_base->lock)); \
167 u != peer_avl_empty; ) { \ 192 u != peer_avl_empty; ) { \
168 if (_daddr == u->v4daddr) \ 193 int cmp = addr_compare(_daddr, &u->daddr); \
194 if (cmp == 0) \
169 break; \ 195 break; \
170 if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ 196 if (cmp == -1) \
171 v = &u->avl_left; \ 197 v = &u->avl_left; \
172 else \ 198 else \
173 v = &u->avl_right; \ 199 v = &u->avl_right; \
174 *stackptr++ = v; \ 200 *stackptr++ = v; \
175 u = rcu_dereference_protected(*v, \ 201 u = rcu_dereference_protected(*v, \
176 lockdep_is_held(&peers.lock)); \ 202 lockdep_is_held(&_base->lock)); \
177 } \ 203 } \
178 u; \ 204 u; \
179}) 205})
@@ -185,13 +211,15 @@ static void unlink_from_unused(struct inet_peer *p)
185 * But every pointer we follow is guaranteed to be valid thanks to RCU. 211 * But every pointer we follow is guaranteed to be valid thanks to RCU.
186 * We exit from this function if number of links exceeds PEER_MAXDEPTH 212 * We exit from this function if number of links exceeds PEER_MAXDEPTH
187 */ 213 */
188static struct inet_peer *lookup_rcu_bh(__be32 daddr) 214static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
215 struct inet_peer_base *base)
189{ 216{
190 struct inet_peer *u = rcu_dereference_bh(peers.root); 217 struct inet_peer *u = rcu_dereference_bh(base->root);
191 int count = 0; 218 int count = 0;
192 219
193 while (u != peer_avl_empty) { 220 while (u != peer_avl_empty) {
194 if (daddr == u->v4daddr) { 221 int cmp = addr_compare(daddr, &u->daddr);
222 if (cmp == 0) {
195 /* Before taking a reference, check if this entry was 223 /* Before taking a reference, check if this entry was
196 * deleted, unlink_from_pool() sets refcnt=-1 to make 224 * deleted, unlink_from_pool() sets refcnt=-1 to make
197 * distinction between an unused entry (refcnt=0) and 225 * distinction between an unused entry (refcnt=0) and
@@ -201,7 +229,7 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
201 u = NULL; 229 u = NULL;
202 return u; 230 return u;
203 } 231 }
204 if ((__force __u32)daddr < (__force __u32)u->v4daddr) 232 if (cmp == -1)
205 u = rcu_dereference_bh(u->avl_left); 233 u = rcu_dereference_bh(u->avl_left);
206 else 234 else
207 u = rcu_dereference_bh(u->avl_right); 235 u = rcu_dereference_bh(u->avl_right);
@@ -212,19 +240,19 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
212} 240}
213 241
214/* Called with local BH disabled and the pool lock held. */ 242/* Called with local BH disabled and the pool lock held. */
215#define lookup_rightempty(start) \ 243#define lookup_rightempty(start, base) \
216({ \ 244({ \
217 struct inet_peer *u; \ 245 struct inet_peer *u; \
218 struct inet_peer __rcu **v; \ 246 struct inet_peer __rcu **v; \
219 *stackptr++ = &start->avl_left; \ 247 *stackptr++ = &start->avl_left; \
220 v = &start->avl_left; \ 248 v = &start->avl_left; \
221 for (u = rcu_dereference_protected(*v, \ 249 for (u = rcu_dereference_protected(*v, \
222 lockdep_is_held(&peers.lock)); \ 250 lockdep_is_held(&base->lock)); \
223 u->avl_right != peer_avl_empty_rcu; ) { \ 251 u->avl_right != peer_avl_empty_rcu; ) { \
224 v = &u->avl_right; \ 252 v = &u->avl_right; \
225 *stackptr++ = v; \ 253 *stackptr++ = v; \
226 u = rcu_dereference_protected(*v, \ 254 u = rcu_dereference_protected(*v, \
227 lockdep_is_held(&peers.lock)); \ 255 lockdep_is_held(&base->lock)); \
228 } \ 256 } \
229 u; \ 257 u; \
230}) 258})
@@ -234,7 +262,8 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
234 * Look into mm/map_avl.c for more detail description of the ideas. 262 * Look into mm/map_avl.c for more detail description of the ideas.
235 */ 263 */
236static void peer_avl_rebalance(struct inet_peer __rcu **stack[], 264static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
237 struct inet_peer __rcu ***stackend) 265 struct inet_peer __rcu ***stackend,
266 struct inet_peer_base *base)
238{ 267{
239 struct inet_peer __rcu **nodep; 268 struct inet_peer __rcu **nodep;
240 struct inet_peer *node, *l, *r; 269 struct inet_peer *node, *l, *r;
@@ -243,20 +272,20 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
243 while (stackend > stack) { 272 while (stackend > stack) {
244 nodep = *--stackend; 273 nodep = *--stackend;
245 node = rcu_dereference_protected(*nodep, 274 node = rcu_dereference_protected(*nodep,
246 lockdep_is_held(&peers.lock)); 275 lockdep_is_held(&base->lock));
247 l = rcu_dereference_protected(node->avl_left, 276 l = rcu_dereference_protected(node->avl_left,
248 lockdep_is_held(&peers.lock)); 277 lockdep_is_held(&base->lock));
249 r = rcu_dereference_protected(node->avl_right, 278 r = rcu_dereference_protected(node->avl_right,
250 lockdep_is_held(&peers.lock)); 279 lockdep_is_held(&base->lock));
251 lh = node_height(l); 280 lh = node_height(l);
252 rh = node_height(r); 281 rh = node_height(r);
253 if (lh > rh + 1) { /* l: RH+2 */ 282 if (lh > rh + 1) { /* l: RH+2 */
254 struct inet_peer *ll, *lr, *lrl, *lrr; 283 struct inet_peer *ll, *lr, *lrl, *lrr;
255 int lrh; 284 int lrh;
256 ll = rcu_dereference_protected(l->avl_left, 285 ll = rcu_dereference_protected(l->avl_left,
257 lockdep_is_held(&peers.lock)); 286 lockdep_is_held(&base->lock));
258 lr = rcu_dereference_protected(l->avl_right, 287 lr = rcu_dereference_protected(l->avl_right,
259 lockdep_is_held(&peers.lock)); 288 lockdep_is_held(&base->lock));
260 lrh = node_height(lr); 289 lrh = node_height(lr);
261 if (lrh <= node_height(ll)) { /* ll: RH+1 */ 290 if (lrh <= node_height(ll)) { /* ll: RH+1 */
262 RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */ 291 RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
@@ -268,9 +297,9 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
268 RCU_INIT_POINTER(*nodep, l); 297 RCU_INIT_POINTER(*nodep, l);
269 } else { /* ll: RH, lr: RH+1 */ 298 } else { /* ll: RH, lr: RH+1 */
270 lrl = rcu_dereference_protected(lr->avl_left, 299 lrl = rcu_dereference_protected(lr->avl_left,
271 lockdep_is_held(&peers.lock)); /* lrl: RH or RH-1 */ 300 lockdep_is_held(&base->lock)); /* lrl: RH or RH-1 */
272 lrr = rcu_dereference_protected(lr->avl_right, 301 lrr = rcu_dereference_protected(lr->avl_right,
273 lockdep_is_held(&peers.lock)); /* lrr: RH or RH-1 */ 302 lockdep_is_held(&base->lock)); /* lrr: RH or RH-1 */
274 RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */ 303 RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
275 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ 304 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
276 node->avl_height = rh + 1; /* node: RH+1 */ 305 node->avl_height = rh + 1; /* node: RH+1 */
@@ -286,9 +315,9 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
286 struct inet_peer *rr, *rl, *rlr, *rll; 315 struct inet_peer *rr, *rl, *rlr, *rll;
287 int rlh; 316 int rlh;
288 rr = rcu_dereference_protected(r->avl_right, 317 rr = rcu_dereference_protected(r->avl_right,
289 lockdep_is_held(&peers.lock)); 318 lockdep_is_held(&base->lock));
290 rl = rcu_dereference_protected(r->avl_left, 319 rl = rcu_dereference_protected(r->avl_left,
291 lockdep_is_held(&peers.lock)); 320 lockdep_is_held(&base->lock));
292 rlh = node_height(rl); 321 rlh = node_height(rl);
293 if (rlh <= node_height(rr)) { /* rr: LH+1 */ 322 if (rlh <= node_height(rr)) { /* rr: LH+1 */
294 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ 323 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
@@ -300,9 +329,9 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
300 RCU_INIT_POINTER(*nodep, r); 329 RCU_INIT_POINTER(*nodep, r);
301 } else { /* rr: RH, rl: RH+1 */ 330 } else { /* rr: RH, rl: RH+1 */
302 rlr = rcu_dereference_protected(rl->avl_right, 331 rlr = rcu_dereference_protected(rl->avl_right,
303 lockdep_is_held(&peers.lock)); /* rlr: LH or LH-1 */ 332 lockdep_is_held(&base->lock)); /* rlr: LH or LH-1 */
304 rll = rcu_dereference_protected(rl->avl_left, 333 rll = rcu_dereference_protected(rl->avl_left,
305 lockdep_is_held(&peers.lock)); /* rll: LH or LH-1 */ 334 lockdep_is_held(&base->lock)); /* rll: LH or LH-1 */
306 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */ 335 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
307 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ 336 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
308 node->avl_height = lh + 1; /* node: LH+1 */ 337 node->avl_height = lh + 1; /* node: LH+1 */
@@ -321,14 +350,14 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
321} 350}
322 351
323/* Called with local BH disabled and the pool lock held. */ 352/* Called with local BH disabled and the pool lock held. */
324#define link_to_pool(n) \ 353#define link_to_pool(n, base) \
325do { \ 354do { \
326 n->avl_height = 1; \ 355 n->avl_height = 1; \
327 n->avl_left = peer_avl_empty_rcu; \ 356 n->avl_left = peer_avl_empty_rcu; \
328 n->avl_right = peer_avl_empty_rcu; \ 357 n->avl_right = peer_avl_empty_rcu; \
329 /* lockless readers can catch us now */ \ 358 /* lockless readers can catch us now */ \
330 rcu_assign_pointer(**--stackptr, n); \ 359 rcu_assign_pointer(**--stackptr, n); \
331 peer_avl_rebalance(stack, stackptr); \ 360 peer_avl_rebalance(stack, stackptr, base); \
332} while (0) 361} while (0)
333 362
334static void inetpeer_free_rcu(struct rcu_head *head) 363static void inetpeer_free_rcu(struct rcu_head *head)
@@ -337,13 +366,13 @@ static void inetpeer_free_rcu(struct rcu_head *head)
337} 366}
338 367
339/* May be called with local BH enabled. */ 368/* May be called with local BH enabled. */
340static void unlink_from_pool(struct inet_peer *p) 369static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
341{ 370{
342 int do_free; 371 int do_free;
343 372
344 do_free = 0; 373 do_free = 0;
345 374
346 spin_lock_bh(&peers.lock); 375 spin_lock_bh(&base->lock);
347 /* Check the reference counter. It was artificially incremented by 1 376 /* Check the reference counter. It was artificially incremented by 1
348 * in cleanup() function to prevent sudden disappearing. If we can 377 * in cleanup() function to prevent sudden disappearing. If we can
349 * atomically (because of lockless readers) take this last reference, 378 * atomically (because of lockless readers) take this last reference,
@@ -353,7 +382,7 @@ static void unlink_from_pool(struct inet_peer *p)
353 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 382 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
354 struct inet_peer __rcu **stack[PEER_MAXDEPTH]; 383 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
355 struct inet_peer __rcu ***stackptr, ***delp; 384 struct inet_peer __rcu ***stackptr, ***delp;
356 if (lookup(p->v4daddr, stack) != p) 385 if (lookup(&p->daddr, stack, base) != p)
357 BUG(); 386 BUG();
358 delp = stackptr - 1; /* *delp[0] == p */ 387 delp = stackptr - 1; /* *delp[0] == p */
359 if (p->avl_left == peer_avl_empty_rcu) { 388 if (p->avl_left == peer_avl_empty_rcu) {
@@ -362,11 +391,11 @@ static void unlink_from_pool(struct inet_peer *p)
362 } else { 391 } else {
363 /* look for a node to insert instead of p */ 392 /* look for a node to insert instead of p */
364 struct inet_peer *t; 393 struct inet_peer *t;
365 t = lookup_rightempty(p); 394 t = lookup_rightempty(p, base);
366 BUG_ON(rcu_dereference_protected(*stackptr[-1], 395 BUG_ON(rcu_dereference_protected(*stackptr[-1],
367 lockdep_is_held(&peers.lock)) != t); 396 lockdep_is_held(&base->lock)) != t);
368 **--stackptr = t->avl_left; 397 **--stackptr = t->avl_left;
369 /* t is removed, t->v4daddr > x->v4daddr for any 398 /* t is removed, t->daddr > x->daddr for any
370 * x in p->avl_left subtree. 399 * x in p->avl_left subtree.
371 * Put t in the old place of p. */ 400 * Put t in the old place of p. */
372 RCU_INIT_POINTER(*delp[0], t); 401 RCU_INIT_POINTER(*delp[0], t);
@@ -376,11 +405,11 @@ static void unlink_from_pool(struct inet_peer *p)
376 BUG_ON(delp[1] != &p->avl_left); 405 BUG_ON(delp[1] != &p->avl_left);
377 delp[1] = &t->avl_left; /* was &p->avl_left */ 406 delp[1] = &t->avl_left; /* was &p->avl_left */
378 } 407 }
379 peer_avl_rebalance(stack, stackptr); 408 peer_avl_rebalance(stack, stackptr, base);
380 peers.total--; 409 base->total--;
381 do_free = 1; 410 do_free = 1;
382 } 411 }
383 spin_unlock_bh(&peers.lock); 412 spin_unlock_bh(&base->lock);
384 413
385 if (do_free) 414 if (do_free)
386 call_rcu_bh(&p->rcu, inetpeer_free_rcu); 415 call_rcu_bh(&p->rcu, inetpeer_free_rcu);
@@ -395,6 +424,16 @@ static void unlink_from_pool(struct inet_peer *p)
395 inet_putpeer(p); 424 inet_putpeer(p);
396} 425}
397 426
427static struct inet_peer_base *family_to_base(int family)
428{
429 return (family == AF_INET ? &v4_peers : &v6_peers);
430}
431
432static struct inet_peer_base *peer_to_base(struct inet_peer *p)
433{
434 return family_to_base(p->daddr.family);
435}
436
398/* May be called with local BH enabled. */ 437/* May be called with local BH enabled. */
399static int cleanup_once(unsigned long ttl) 438static int cleanup_once(unsigned long ttl)
400{ 439{
@@ -428,21 +467,22 @@ static int cleanup_once(unsigned long ttl)
428 * happen because of entry limits in route cache. */ 467 * happen because of entry limits in route cache. */
429 return -1; 468 return -1;
430 469
431 unlink_from_pool(p); 470 unlink_from_pool(p, peer_to_base(p));
432 return 0; 471 return 0;
433} 472}
434 473
435/* Called with or without local BH being disabled. */ 474/* Called with or without local BH being disabled. */
436struct inet_peer *inet_getpeer(__be32 daddr, int create) 475struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
437{ 476{
438 struct inet_peer *p;
439 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 477 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
478 struct inet_peer_base *base = family_to_base(AF_INET);
479 struct inet_peer *p;
440 480
441 /* Look up for the address quickly, lockless. 481 /* Look up for the address quickly, lockless.
442 * Because of a concurrent writer, we might not find an existing entry. 482 * Because of a concurrent writer, we might not find an existing entry.
443 */ 483 */
444 rcu_read_lock_bh(); 484 rcu_read_lock_bh();
445 p = lookup_rcu_bh(daddr); 485 p = lookup_rcu_bh(daddr, base);
446 rcu_read_unlock_bh(); 486 rcu_read_unlock_bh();
447 487
448 if (p) { 488 if (p) {
@@ -456,50 +496,57 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
456 /* retry an exact lookup, taking the lock before. 496 /* retry an exact lookup, taking the lock before.
457 * At least, nodes should be hot in our cache. 497 * At least, nodes should be hot in our cache.
458 */ 498 */
459 spin_lock_bh(&peers.lock); 499 spin_lock_bh(&base->lock);
460 p = lookup(daddr, stack); 500 p = lookup(daddr, stack, base);
461 if (p != peer_avl_empty) { 501 if (p != peer_avl_empty) {
462 atomic_inc(&p->refcnt); 502 atomic_inc(&p->refcnt);
463 spin_unlock_bh(&peers.lock); 503 spin_unlock_bh(&base->lock);
464 /* Remove the entry from unused list if it was there. */ 504 /* Remove the entry from unused list if it was there. */
465 unlink_from_unused(p); 505 unlink_from_unused(p);
466 return p; 506 return p;
467 } 507 }
468 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; 508 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
469 if (p) { 509 if (p) {
470 p->v4daddr = daddr; 510 p->daddr = *daddr;
471 atomic_set(&p->refcnt, 1); 511 atomic_set(&p->refcnt, 1);
472 atomic_set(&p->rid, 0); 512 atomic_set(&p->rid, 0);
473 atomic_set(&p->ip_id_count, secure_ip_id(daddr)); 513 atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
474 p->tcp_ts_stamp = 0; 514 p->tcp_ts_stamp = 0;
475 INIT_LIST_HEAD(&p->unused); 515 INIT_LIST_HEAD(&p->unused);
476 516
477 517
478 /* Link the node. */ 518 /* Link the node. */
479 link_to_pool(p); 519 link_to_pool(p, base);
480 peers.total++; 520 base->total++;
481 } 521 }
482 spin_unlock_bh(&peers.lock); 522 spin_unlock_bh(&base->lock);
483 523
484 if (peers.total >= inet_peer_threshold) 524 if (base->total >= inet_peer_threshold)
485 /* Remove one less-recently-used entry. */ 525 /* Remove one less-recently-used entry. */
486 cleanup_once(0); 526 cleanup_once(0);
487 527
488 return p; 528 return p;
489} 529}
490 530
531static int compute_total(void)
532{
533 return v4_peers.total + v6_peers.total;
534}
535EXPORT_SYMBOL_GPL(inet_getpeer);
536
491/* Called with local BH disabled. */ 537/* Called with local BH disabled. */
492static void peer_check_expire(unsigned long dummy) 538static void peer_check_expire(unsigned long dummy)
493{ 539{
494 unsigned long now = jiffies; 540 unsigned long now = jiffies;
495 int ttl; 541 int ttl, total;
496 542
497 if (peers.total >= inet_peer_threshold) 543 total = compute_total();
544 if (total >= inet_peer_threshold)
498 ttl = inet_peer_minttl; 545 ttl = inet_peer_minttl;
499 else 546 else
500 ttl = inet_peer_maxttl 547 ttl = inet_peer_maxttl
501 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 548 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
502 peers.total / inet_peer_threshold * HZ; 549 total / inet_peer_threshold * HZ;
503 while (!cleanup_once(ttl)) { 550 while (!cleanup_once(ttl)) {
504 if (jiffies != now) 551 if (jiffies != now)
505 break; 552 break;
@@ -508,13 +555,14 @@ static void peer_check_expire(unsigned long dummy)
508 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime 555 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
509 * interval depending on the total number of entries (more entries, 556 * interval depending on the total number of entries (more entries,
510 * less interval). */ 557 * less interval). */
511 if (peers.total >= inet_peer_threshold) 558 total = compute_total();
559 if (total >= inet_peer_threshold)
512 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; 560 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
513 else 561 else
514 peer_periodic_timer.expires = jiffies 562 peer_periodic_timer.expires = jiffies
515 + inet_peer_gc_maxtime 563 + inet_peer_gc_maxtime
516 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * 564 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
517 peers.total / inet_peer_threshold * HZ; 565 total / inet_peer_threshold * HZ;
518 add_timer(&peer_periodic_timer); 566 add_timer(&peer_periodic_timer);
519} 567}
520 568
@@ -530,3 +578,4 @@ void inet_putpeer(struct inet_peer *p)
530 578
531 local_bh_enable(); 579 local_bh_enable();
532} 580}
581EXPORT_SYMBOL_GPL(inet_putpeer);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 168440834ad..e6215bdd96c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -141,7 +141,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, void *a)
141 qp->daddr = arg->iph->daddr; 141 qp->daddr = arg->iph->daddr;
142 qp->user = arg->user; 142 qp->user = arg->user;
143 qp->peer = sysctl_ipfrag_max_dist ? 143 qp->peer = sysctl_ipfrag_max_dist ?
144 inet_getpeer(arg->iph->saddr, 1) : NULL; 144 inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
145} 145}
146 146
147static __inline__ void ip4_frag_free(struct inet_frag_queue *q) 147static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 70ff77f02ee..258c98d5fa7 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -405,11 +405,11 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
405 if (parms->name[0]) 405 if (parms->name[0])
406 strlcpy(name, parms->name, IFNAMSIZ); 406 strlcpy(name, parms->name, IFNAMSIZ);
407 else 407 else
408 sprintf(name, "gre%%d"); 408 strcpy(name, "gre%d");
409 409
410 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup); 410 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
411 if (!dev) 411 if (!dev)
412 return NULL; 412 return NULL;
413 413
414 dev_net_set(dev, net); 414 dev_net_set(dev, net);
415 415
@@ -634,7 +634,7 @@ static int ipgre_rcv(struct sk_buff *skb)
634#ifdef CONFIG_NET_IPGRE_BROADCAST 634#ifdef CONFIG_NET_IPGRE_BROADCAST
635 if (ipv4_is_multicast(iph->daddr)) { 635 if (ipv4_is_multicast(iph->daddr)) {
636 /* Looped back packet, drop it! */ 636 /* Looped back packet, drop it! */
637 if (skb_rtable(skb)->fl.iif == 0) 637 if (rt_is_output_route(skb_rtable(skb)))
638 goto drop; 638 goto drop;
639 tunnel->dev->stats.multicast++; 639 tunnel->dev->stats.multicast++;
640 skb->pkt_type = PACKET_BROADCAST; 640 skb->pkt_type = PACKET_BROADCAST;
@@ -772,16 +772,11 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
772 { 772 {
773 struct flowi fl = { 773 struct flowi fl = {
774 .oif = tunnel->parms.link, 774 .oif = tunnel->parms.link,
775 .nl_u = { 775 .fl4_dst = dst,
776 .ip4_u = { 776 .fl4_src = tiph->saddr,
777 .daddr = dst, 777 .fl4_tos = RT_TOS(tos),
778 .saddr = tiph->saddr, 778 .fl_gre_key = tunnel->parms.o_key
779 .tos = RT_TOS(tos) 779 };
780 }
781 },
782 .proto = IPPROTO_GRE
783 }
784;
785 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 780 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
786 dev->stats.tx_carrier_errors++; 781 dev->stats.tx_carrier_errors++;
787 goto tx_error; 782 goto tx_error;
@@ -951,14 +946,11 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
951 if (iph->daddr) { 946 if (iph->daddr) {
952 struct flowi fl = { 947 struct flowi fl = {
953 .oif = tunnel->parms.link, 948 .oif = tunnel->parms.link,
954 .nl_u = { 949 .fl4_dst = iph->daddr,
955 .ip4_u = { 950 .fl4_src = iph->saddr,
956 .daddr = iph->daddr, 951 .fl4_tos = RT_TOS(iph->tos),
957 .saddr = iph->saddr, 952 .proto = IPPROTO_GRE,
958 .tos = RT_TOS(iph->tos) 953 .fl_gre_key = tunnel->parms.o_key
959 }
960 },
961 .proto = IPPROTO_GRE
962 }; 954 };
963 struct rtable *rt; 955 struct rtable *rt;
964 956
@@ -1216,14 +1208,11 @@ static int ipgre_open(struct net_device *dev)
1216 if (ipv4_is_multicast(t->parms.iph.daddr)) { 1208 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1217 struct flowi fl = { 1209 struct flowi fl = {
1218 .oif = t->parms.link, 1210 .oif = t->parms.link,
1219 .nl_u = { 1211 .fl4_dst = t->parms.iph.daddr,
1220 .ip4_u = { 1212 .fl4_src = t->parms.iph.saddr,
1221 .daddr = t->parms.iph.daddr, 1213 .fl4_tos = RT_TOS(t->parms.iph.tos),
1222 .saddr = t->parms.iph.saddr, 1214 .proto = IPPROTO_GRE,
1223 .tos = RT_TOS(t->parms.iph.tos) 1215 .fl_gre_key = t->parms.o_key
1224 }
1225 },
1226 .proto = IPPROTO_GRE
1227 }; 1216 };
1228 struct rtable *rt; 1217 struct rtable *rt;
1229 1218
@@ -1775,3 +1764,4 @@ module_exit(ipgre_fini);
1775MODULE_LICENSE("GPL"); 1764MODULE_LICENSE("GPL");
1776MODULE_ALIAS_RTNL_LINK("gre"); 1765MODULE_ALIAS_RTNL_LINK("gre");
1777MODULE_ALIAS_RTNL_LINK("gretap"); 1766MODULE_ALIAS_RTNL_LINK("gretap");
1767MODULE_ALIAS("gre0");
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 439d2a34ee4..5090c7ff525 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -341,15 +341,13 @@ int ip_queue_xmit(struct sk_buff *skb)
341 { 341 {
342 struct flowi fl = { .oif = sk->sk_bound_dev_if, 342 struct flowi fl = { .oif = sk->sk_bound_dev_if,
343 .mark = sk->sk_mark, 343 .mark = sk->sk_mark,
344 .nl_u = { .ip4_u = 344 .fl4_dst = daddr,
345 { .daddr = daddr, 345 .fl4_src = inet->inet_saddr,
346 .saddr = inet->inet_saddr, 346 .fl4_tos = RT_CONN_FLAGS(sk),
347 .tos = RT_CONN_FLAGS(sk) } },
348 .proto = sk->sk_protocol, 347 .proto = sk->sk_protocol,
349 .flags = inet_sk_flowi_flags(sk), 348 .flags = inet_sk_flowi_flags(sk),
350 .uli_u = { .ports = 349 .fl_ip_sport = inet->inet_sport,
351 { .sport = inet->inet_sport, 350 .fl_ip_dport = inet->inet_dport };
352 .dport = inet->inet_dport } } };
353 351
354 /* If this fails, retransmit mechanism of transport layer will 352 /* If this fails, retransmit mechanism of transport layer will
355 * keep trying until route appears or the connection times 353 * keep trying until route appears or the connection times
@@ -1404,14 +1402,11 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1404 1402
1405 { 1403 {
1406 struct flowi fl = { .oif = arg->bound_dev_if, 1404 struct flowi fl = { .oif = arg->bound_dev_if,
1407 .nl_u = { .ip4_u = 1405 .fl4_dst = daddr,
1408 { .daddr = daddr, 1406 .fl4_src = rt->rt_spec_dst,
1409 .saddr = rt->rt_spec_dst, 1407 .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
1410 .tos = RT_TOS(ip_hdr(skb)->tos) } }, 1408 .fl_ip_sport = tcp_hdr(skb)->dest,
1411 /* Not quite clean, but right. */ 1409 .fl_ip_dport = tcp_hdr(skb)->source,
1412 .uli_u = { .ports =
1413 { .sport = tcp_hdr(skb)->dest,
1414 .dport = tcp_hdr(skb)->source } },
1415 .proto = sk->sk_protocol, 1410 .proto = sk->sk_protocol,
1416 .flags = ip_reply_arg_flowi_flags(arg) }; 1411 .flags = ip_reply_arg_flowi_flags(arg) };
1417 security_skb_classify_flow(skb, &fl); 1412 security_skb_classify_flow(skb, &fl);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 3a6e1ec5e9a..2b097752426 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1191,13 +1191,13 @@ static int __init ic_dynamic(void)
1191 (ic_proto_enabled & IC_USE_DHCP) && 1191 (ic_proto_enabled & IC_USE_DHCP) &&
1192 ic_dhcp_msgtype != DHCPACK) { 1192 ic_dhcp_msgtype != DHCPACK) {
1193 ic_got_reply = 0; 1193 ic_got_reply = 0;
1194 printk(","); 1194 printk(KERN_CONT ",");
1195 continue; 1195 continue;
1196 } 1196 }
1197#endif /* IPCONFIG_DHCP */ 1197#endif /* IPCONFIG_DHCP */
1198 1198
1199 if (ic_got_reply) { 1199 if (ic_got_reply) {
1200 printk(" OK\n"); 1200 printk(KERN_CONT " OK\n");
1201 break; 1201 break;
1202 } 1202 }
1203 1203
@@ -1205,7 +1205,7 @@ static int __init ic_dynamic(void)
1205 continue; 1205 continue;
1206 1206
1207 if (! --retries) { 1207 if (! --retries) {
1208 printk(" timed out!\n"); 1208 printk(KERN_CONT " timed out!\n");
1209 break; 1209 break;
1210 } 1210 }
1211 1211
@@ -1215,7 +1215,7 @@ static int __init ic_dynamic(void)
1215 if (timeout > CONF_TIMEOUT_MAX) 1215 if (timeout > CONF_TIMEOUT_MAX)
1216 timeout = CONF_TIMEOUT_MAX; 1216 timeout = CONF_TIMEOUT_MAX;
1217 1217
1218 printk("."); 1218 printk(KERN_CONT ".");
1219 } 1219 }
1220 1220
1221#ifdef IPCONFIG_BOOTP 1221#ifdef IPCONFIG_BOOTP
@@ -1236,7 +1236,7 @@ static int __init ic_dynamic(void)
1236 ((ic_got_reply & IC_RARP) ? "RARP" 1236 ((ic_got_reply & IC_RARP) ? "RARP"
1237 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), 1237 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
1238 &ic_servaddr); 1238 &ic_servaddr);
1239 printk("my address is %pI4\n", &ic_myaddr); 1239 printk(KERN_CONT "my address is %pI4\n", &ic_myaddr);
1240 1240
1241 return 0; 1241 return 0;
1242} 1242}
@@ -1468,19 +1468,19 @@ static int __init ip_auto_config(void)
1468 /* 1468 /*
1469 * Clue in the operator. 1469 * Clue in the operator.
1470 */ 1470 */
1471 printk("IP-Config: Complete:"); 1471 printk("IP-Config: Complete:\n");
1472 printk("\n device=%s", ic_dev->name); 1472 printk(" device=%s", ic_dev->name);
1473 printk(", addr=%pI4", &ic_myaddr); 1473 printk(KERN_CONT ", addr=%pI4", &ic_myaddr);
1474 printk(", mask=%pI4", &ic_netmask); 1474 printk(KERN_CONT ", mask=%pI4", &ic_netmask);
1475 printk(", gw=%pI4", &ic_gateway); 1475 printk(KERN_CONT ", gw=%pI4", &ic_gateway);
1476 printk(",\n host=%s, domain=%s, nis-domain=%s", 1476 printk(KERN_CONT ",\n host=%s, domain=%s, nis-domain=%s",
1477 utsname()->nodename, ic_domain, utsname()->domainname); 1477 utsname()->nodename, ic_domain, utsname()->domainname);
1478 printk(",\n bootserver=%pI4", &ic_servaddr); 1478 printk(KERN_CONT ",\n bootserver=%pI4", &ic_servaddr);
1479 printk(", rootserver=%pI4", &root_server_addr); 1479 printk(KERN_CONT ", rootserver=%pI4", &root_server_addr);
1480 printk(", rootpath=%s", root_server_path); 1480 printk(KERN_CONT ", rootpath=%s", root_server_path);
1481 if (ic_dev_mtu) 1481 if (ic_dev_mtu)
1482 printk(", mtu=%d", ic_dev_mtu); 1482 printk(KERN_CONT ", mtu=%d", ic_dev_mtu);
1483 printk("\n"); 1483 printk(KERN_CONT "\n");
1484#endif /* !SILENT */ 1484#endif /* !SILENT */
1485 1485
1486 return 0; 1486 return 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index cd300aaee78..988f52fba54 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -463,13 +463,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
463 { 463 {
464 struct flowi fl = { 464 struct flowi fl = {
465 .oif = tunnel->parms.link, 465 .oif = tunnel->parms.link,
466 .nl_u = { 466 .fl4_dst = dst,
467 .ip4_u = { 467 .fl4_src= tiph->saddr,
468 .daddr = dst, 468 .fl4_tos = RT_TOS(tos),
469 .saddr = tiph->saddr,
470 .tos = RT_TOS(tos)
471 }
472 },
473 .proto = IPPROTO_IPIP 469 .proto = IPPROTO_IPIP
474 }; 470 };
475 471
@@ -589,13 +585,9 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
589 if (iph->daddr) { 585 if (iph->daddr) {
590 struct flowi fl = { 586 struct flowi fl = {
591 .oif = tunnel->parms.link, 587 .oif = tunnel->parms.link,
592 .nl_u = { 588 .fl4_dst = iph->daddr,
593 .ip4_u = { 589 .fl4_src = iph->saddr,
594 .daddr = iph->daddr, 590 .fl4_tos = RT_TOS(iph->tos),
595 .saddr = iph->saddr,
596 .tos = RT_TOS(iph->tos)
597 }
598 },
599 .proto = IPPROTO_IPIP 591 .proto = IPPROTO_IPIP
600 }; 592 };
601 struct rtable *rt; 593 struct rtable *rt;
@@ -921,3 +913,4 @@ static void __exit ipip_fini(void)
921module_init(ipip_init); 913module_init(ipip_init);
922module_exit(ipip_fini); 914module_exit(ipip_fini);
923MODULE_LICENSE("GPL"); 915MODULE_LICENSE("GPL");
916MODULE_ALIAS("tunl0");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 86dd5691af4..3f3a9afd73e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1537,13 +1537,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1537 if (vif->flags & VIFF_TUNNEL) { 1537 if (vif->flags & VIFF_TUNNEL) {
1538 struct flowi fl = { 1538 struct flowi fl = {
1539 .oif = vif->link, 1539 .oif = vif->link,
1540 .nl_u = { 1540 .fl4_dst = vif->remote,
1541 .ip4_u = { 1541 .fl4_src = vif->local,
1542 .daddr = vif->remote, 1542 .fl4_tos = RT_TOS(iph->tos),
1543 .saddr = vif->local,
1544 .tos = RT_TOS(iph->tos)
1545 }
1546 },
1547 .proto = IPPROTO_IPIP 1543 .proto = IPPROTO_IPIP
1548 }; 1544 };
1549 1545
@@ -1553,12 +1549,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1553 } else { 1549 } else {
1554 struct flowi fl = { 1550 struct flowi fl = {
1555 .oif = vif->link, 1551 .oif = vif->link,
1556 .nl_u = { 1552 .fl4_dst = iph->daddr,
1557 .ip4_u = { 1553 .fl4_tos = RT_TOS(iph->tos),
1558 .daddr = iph->daddr,
1559 .tos = RT_TOS(iph->tos)
1560 }
1561 },
1562 .proto = IPPROTO_IPIP 1554 .proto = IPPROTO_IPIP
1563 }; 1555 };
1564 1556
@@ -1654,7 +1646,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1654 if (mrt->vif_table[vif].dev != skb->dev) { 1646 if (mrt->vif_table[vif].dev != skb->dev) {
1655 int true_vifi; 1647 int true_vifi;
1656 1648
1657 if (skb_rtable(skb)->fl.iif == 0) { 1649 if (rt_is_output_route(skb_rtable(skb))) {
1658 /* It is our own packet, looped back. 1650 /* It is our own packet, looped back.
1659 * Very complicated situation... 1651 * Very complicated situation...
1660 * 1652 *
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index d88a46c54fd..994a1f29ebb 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -31,10 +31,10 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
31 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. 31 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
32 */ 32 */
33 if (addr_type == RTN_LOCAL) { 33 if (addr_type == RTN_LOCAL) {
34 fl.nl_u.ip4_u.daddr = iph->daddr; 34 fl.fl4_dst = iph->daddr;
35 if (type == RTN_LOCAL) 35 if (type == RTN_LOCAL)
36 fl.nl_u.ip4_u.saddr = iph->saddr; 36 fl.fl4_src = iph->saddr;
37 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); 37 fl.fl4_tos = RT_TOS(iph->tos);
38 fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 38 fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
39 fl.mark = skb->mark; 39 fl.mark = skb->mark;
40 fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 40 fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
@@ -47,7 +47,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
47 } else { 47 } else {
48 /* non-local src, find valid iif to satisfy 48 /* non-local src, find valid iif to satisfy
49 * rp-filter when calling ip_route_input. */ 49 * rp-filter when calling ip_route_input. */
50 fl.nl_u.ip4_u.daddr = iph->saddr; 50 fl.fl4_dst = iph->saddr;
51 if (ip_route_output_key(net, &rt, &fl) != 0) 51 if (ip_route_output_key(net, &rt, &fl) != 0)
52 return -1; 52 return -1;
53 53
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 48111594ee9..19eb59d0103 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -3,15 +3,15 @@
3# 3#
4 4
5# objects for l3 independent conntrack 5# objects for l3 independent conntrack
6nf_conntrack_ipv4-objs := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o 6nf_conntrack_ipv4-y := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
7ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y) 7ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y)
8ifeq ($(CONFIG_PROC_FS),y) 8ifeq ($(CONFIG_PROC_FS),y)
9nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o 9nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
10endif 10endif
11endif 11endif
12 12
13nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o 13nf_nat-y := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
14iptable_nat-objs := nf_nat_rule.o nf_nat_standalone.o 14iptable_nat-y := nf_nat_rule.o nf_nat_standalone.o
15 15
16# connection tracking 16# connection tracking
17obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o 17obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 1f85ef28989..a3d5ab786e8 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -549,10 +549,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
549 { 549 {
550 struct flowi fl = { .oif = ipc.oif, 550 struct flowi fl = { .oif = ipc.oif,
551 .mark = sk->sk_mark, 551 .mark = sk->sk_mark,
552 .nl_u = { .ip4_u = 552 .fl4_dst = daddr,
553 { .daddr = daddr, 553 .fl4_src = saddr,
554 .saddr = saddr, 554 .fl4_tos = tos,
555 .tos = tos } },
556 .proto = inet->hdrincl ? IPPROTO_RAW : 555 .proto = inet->hdrincl ? IPPROTO_RAW :
557 sk->sk_protocol, 556 sk->sk_protocol,
558 }; 557 };
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 987bf9adb31..3843c2dfde8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -140,13 +140,15 @@ static unsigned long expires_ljiffies;
140 140
141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142static void ipv4_dst_destroy(struct dst_entry *dst); 142static void ipv4_dst_destroy(struct dst_entry *dst);
143static void ipv4_dst_ifdown(struct dst_entry *dst,
144 struct net_device *dev, int how);
145static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); 143static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
146static void ipv4_link_failure(struct sk_buff *skb); 144static void ipv4_link_failure(struct sk_buff *skb);
147static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); 145static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
148static int rt_garbage_collect(struct dst_ops *ops); 146static int rt_garbage_collect(struct dst_ops *ops);
149 147
148static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
149 int how)
150{
151}
150 152
151static struct dst_ops ipv4_dst_ops = { 153static struct dst_ops ipv4_dst_ops = {
152 .family = AF_INET, 154 .family = AF_INET,
@@ -621,7 +623,7 @@ static inline int rt_fast_clean(struct rtable *rth)
621 /* Kill broadcast/multicast entries very aggresively, if they 623 /* Kill broadcast/multicast entries very aggresively, if they
622 collide in hash table with more useful entries */ 624 collide in hash table with more useful entries */
623 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && 625 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
624 rth->fl.iif && rth->dst.rt_next; 626 rt_is_input_route(rth) && rth->dst.rt_next;
625} 627}
626 628
627static inline int rt_valuable(struct rtable *rth) 629static inline int rt_valuable(struct rtable *rth)
@@ -666,7 +668,7 @@ static inline u32 rt_score(struct rtable *rt)
666 if (rt_valuable(rt)) 668 if (rt_valuable(rt))
667 score |= (1<<31); 669 score |= (1<<31);
668 670
669 if (!rt->fl.iif || 671 if (rt_is_output_route(rt) ||
670 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL))) 672 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
671 score |= (1<<30); 673 score |= (1<<30);
672 674
@@ -682,17 +684,17 @@ static inline bool rt_caching(const struct net *net)
682static inline bool compare_hash_inputs(const struct flowi *fl1, 684static inline bool compare_hash_inputs(const struct flowi *fl1,
683 const struct flowi *fl2) 685 const struct flowi *fl2)
684{ 686{
685 return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) | 687 return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
686 ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) | 688 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
687 (fl1->iif ^ fl2->iif)) == 0); 689 (fl1->iif ^ fl2->iif)) == 0);
688} 690}
689 691
690static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 692static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
691{ 693{
692 return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) | 694 return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
693 ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) | 695 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
694 (fl1->mark ^ fl2->mark) | 696 (fl1->mark ^ fl2->mark) |
695 (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) | 697 (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
696 (fl1->oif ^ fl2->oif) | 698 (fl1->oif ^ fl2->oif) |
697 (fl1->iif ^ fl2->iif)) == 0; 699 (fl1->iif ^ fl2->iif)) == 0;
698} 700}
@@ -1124,7 +1126,7 @@ restart:
1124 */ 1126 */
1125 1127
1126 rt->dst.flags |= DST_NOCACHE; 1128 rt->dst.flags |= DST_NOCACHE;
1127 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1129 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1128 int err = arp_bind_neighbour(&rt->dst); 1130 int err = arp_bind_neighbour(&rt->dst);
1129 if (err) { 1131 if (err) {
1130 if (net_ratelimit()) 1132 if (net_ratelimit())
@@ -1222,7 +1224,7 @@ restart:
1222 /* Try to bind route to arp only if it is output 1224 /* Try to bind route to arp only if it is output
1223 route or unicast forwarding path. 1225 route or unicast forwarding path.
1224 */ 1226 */
1225 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1227 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1226 int err = arp_bind_neighbour(&rt->dst); 1228 int err = arp_bind_neighbour(&rt->dst);
1227 if (err) { 1229 if (err) {
1228 spin_unlock_bh(rt_hash_lock_addr(hash)); 1230 spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1287,7 +1289,7 @@ void rt_bind_peer(struct rtable *rt, int create)
1287{ 1289{
1288 struct inet_peer *peer; 1290 struct inet_peer *peer;
1289 1291
1290 peer = inet_getpeer(rt->rt_dst, create); 1292 peer = inet_getpeer_v4(rt->rt_dst, create);
1291 1293
1292 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL) 1294 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1293 inet_putpeer(peer); 1295 inet_putpeer(peer);
@@ -1404,7 +1406,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1404 if (rth->fl.fl4_dst != daddr || 1406 if (rth->fl.fl4_dst != daddr ||
1405 rth->fl.fl4_src != skeys[i] || 1407 rth->fl.fl4_src != skeys[i] ||
1406 rth->fl.oif != ikeys[k] || 1408 rth->fl.oif != ikeys[k] ||
1407 rth->fl.iif != 0 || 1409 rt_is_input_route(rth) ||
1408 rt_is_expired(rth) || 1410 rt_is_expired(rth) ||
1409 !net_eq(dev_net(rth->dst.dev), net)) { 1411 !net_eq(dev_net(rth->dst.dev), net)) {
1410 rthp = &rth->dst.rt_next; 1412 rthp = &rth->dst.rt_next;
@@ -1433,8 +1435,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1433 rt->dst.child = NULL; 1435 rt->dst.child = NULL;
1434 if (rt->dst.dev) 1436 if (rt->dst.dev)
1435 dev_hold(rt->dst.dev); 1437 dev_hold(rt->dst.dev);
1436 if (rt->idev)
1437 in_dev_hold(rt->idev);
1438 rt->dst.obsolete = -1; 1438 rt->dst.obsolete = -1;
1439 rt->dst.lastuse = jiffies; 1439 rt->dst.lastuse = jiffies;
1440 rt->dst.path = &rt->dst; 1440 rt->dst.path = &rt->dst;
@@ -1666,7 +1666,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1666 rth->rt_dst != daddr || 1666 rth->rt_dst != daddr ||
1667 rth->rt_src != iph->saddr || 1667 rth->rt_src != iph->saddr ||
1668 rth->fl.oif != ikeys[k] || 1668 rth->fl.oif != ikeys[k] ||
1669 rth->fl.iif != 0 || 1669 rt_is_input_route(rth) ||
1670 dst_metric_locked(&rth->dst, RTAX_MTU) || 1670 dst_metric_locked(&rth->dst, RTAX_MTU) ||
1671 !net_eq(dev_net(rth->dst.dev), net) || 1671 !net_eq(dev_net(rth->dst.dev), net) ||
1672 rt_is_expired(rth)) 1672 rt_is_expired(rth))
@@ -1728,33 +1728,13 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1728{ 1728{
1729 struct rtable *rt = (struct rtable *) dst; 1729 struct rtable *rt = (struct rtable *) dst;
1730 struct inet_peer *peer = rt->peer; 1730 struct inet_peer *peer = rt->peer;
1731 struct in_device *idev = rt->idev;
1732 1731
1733 if (peer) { 1732 if (peer) {
1734 rt->peer = NULL; 1733 rt->peer = NULL;
1735 inet_putpeer(peer); 1734 inet_putpeer(peer);
1736 } 1735 }
1737
1738 if (idev) {
1739 rt->idev = NULL;
1740 in_dev_put(idev);
1741 }
1742} 1736}
1743 1737
1744static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1745 int how)
1746{
1747 struct rtable *rt = (struct rtable *) dst;
1748 struct in_device *idev = rt->idev;
1749 if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
1750 struct in_device *loopback_idev =
1751 in_dev_get(dev_net(dev)->loopback_dev);
1752 if (loopback_idev) {
1753 rt->idev = loopback_idev;
1754 in_dev_put(idev);
1755 }
1756 }
1757}
1758 1738
1759static void ipv4_link_failure(struct sk_buff *skb) 1739static void ipv4_link_failure(struct sk_buff *skb)
1760{ 1740{
@@ -1790,7 +1770,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1790 __be32 src; 1770 __be32 src;
1791 struct fib_result res; 1771 struct fib_result res;
1792 1772
1793 if (rt->fl.iif == 0) 1773 if (rt_is_output_route(rt))
1794 src = rt->rt_src; 1774 src = rt->rt_src;
1795 else { 1775 else {
1796 rcu_read_lock(); 1776 rcu_read_lock();
@@ -1910,7 +1890,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1910 rth->fl.iif = dev->ifindex; 1890 rth->fl.iif = dev->ifindex;
1911 rth->dst.dev = init_net.loopback_dev; 1891 rth->dst.dev = init_net.loopback_dev;
1912 dev_hold(rth->dst.dev); 1892 dev_hold(rth->dst.dev);
1913 rth->idev = in_dev_get(rth->dst.dev);
1914 rth->fl.oif = 0; 1893 rth->fl.oif = 0;
1915 rth->rt_gateway = daddr; 1894 rth->rt_gateway = daddr;
1916 rth->rt_spec_dst= spec_dst; 1895 rth->rt_spec_dst= spec_dst;
@@ -2050,7 +2029,6 @@ static int __mkroute_input(struct sk_buff *skb,
2050 rth->fl.iif = in_dev->dev->ifindex; 2029 rth->fl.iif = in_dev->dev->ifindex;
2051 rth->dst.dev = (out_dev)->dev; 2030 rth->dst.dev = (out_dev)->dev;
2052 dev_hold(rth->dst.dev); 2031 dev_hold(rth->dst.dev);
2053 rth->idev = in_dev_get(rth->dst.dev);
2054 rth->fl.oif = 0; 2032 rth->fl.oif = 0;
2055 rth->rt_spec_dst= spec_dst; 2033 rth->rt_spec_dst= spec_dst;
2056 2034
@@ -2111,12 +2089,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2111{ 2089{
2112 struct fib_result res; 2090 struct fib_result res;
2113 struct in_device *in_dev = __in_dev_get_rcu(dev); 2091 struct in_device *in_dev = __in_dev_get_rcu(dev);
2114 struct flowi fl = { .nl_u = { .ip4_u = 2092 struct flowi fl = { .fl4_dst = daddr,
2115 { .daddr = daddr, 2093 .fl4_src = saddr,
2116 .saddr = saddr, 2094 .fl4_tos = tos,
2117 .tos = tos, 2095 .fl4_scope = RT_SCOPE_UNIVERSE,
2118 .scope = RT_SCOPE_UNIVERSE,
2119 } },
2120 .mark = skb->mark, 2096 .mark = skb->mark,
2121 .iif = dev->ifindex }; 2097 .iif = dev->ifindex };
2122 unsigned flags = 0; 2098 unsigned flags = 0;
@@ -2231,7 +2207,6 @@ local_input:
2231 rth->fl.iif = dev->ifindex; 2207 rth->fl.iif = dev->ifindex;
2232 rth->dst.dev = net->loopback_dev; 2208 rth->dst.dev = net->loopback_dev;
2233 dev_hold(rth->dst.dev); 2209 dev_hold(rth->dst.dev);
2234 rth->idev = in_dev_get(rth->dst.dev);
2235 rth->rt_gateway = daddr; 2210 rth->rt_gateway = daddr;
2236 rth->rt_spec_dst= spec_dst; 2211 rth->rt_spec_dst= spec_dst;
2237 rth->dst.input= ip_local_deliver; 2212 rth->dst.input= ip_local_deliver;
@@ -2417,9 +2392,6 @@ static int __mkroute_output(struct rtable **result,
2417 if (!rth) 2392 if (!rth)
2418 return -ENOBUFS; 2393 return -ENOBUFS;
2419 2394
2420 in_dev_hold(in_dev);
2421 rth->idev = in_dev;
2422
2423 atomic_set(&rth->dst.__refcnt, 1); 2395 atomic_set(&rth->dst.__refcnt, 1);
2424 rth->dst.flags= DST_HOST; 2396 rth->dst.flags= DST_HOST;
2425 if (IN_DEV_CONF_GET(in_dev, NOXFRM)) 2397 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
@@ -2506,14 +2478,11 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2506 const struct flowi *oldflp) 2478 const struct flowi *oldflp)
2507{ 2479{
2508 u32 tos = RT_FL_TOS(oldflp); 2480 u32 tos = RT_FL_TOS(oldflp);
2509 struct flowi fl = { .nl_u = { .ip4_u = 2481 struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
2510 { .daddr = oldflp->fl4_dst, 2482 .fl4_src = oldflp->fl4_src,
2511 .saddr = oldflp->fl4_src, 2483 .fl4_tos = tos & IPTOS_RT_MASK,
2512 .tos = tos & IPTOS_RT_MASK, 2484 .fl4_scope = ((tos & RTO_ONLINK) ?
2513 .scope = ((tos & RTO_ONLINK) ? 2485 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
2514 RT_SCOPE_LINK :
2515 RT_SCOPE_UNIVERSE),
2516 } },
2517 .mark = oldflp->mark, 2486 .mark = oldflp->mark,
2518 .iif = net->loopback_dev->ifindex, 2487 .iif = net->loopback_dev->ifindex,
2519 .oif = oldflp->oif }; 2488 .oif = oldflp->oif };
@@ -2695,7 +2664,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2695 rth = rcu_dereference_bh(rth->dst.rt_next)) { 2664 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2696 if (rth->fl.fl4_dst == flp->fl4_dst && 2665 if (rth->fl.fl4_dst == flp->fl4_dst &&
2697 rth->fl.fl4_src == flp->fl4_src && 2666 rth->fl.fl4_src == flp->fl4_src &&
2698 rth->fl.iif == 0 && 2667 rt_is_output_route(rth) &&
2699 rth->fl.oif == flp->oif && 2668 rth->fl.oif == flp->oif &&
2700 rth->fl.mark == flp->mark && 2669 rth->fl.mark == flp->mark &&
2701 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2670 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
@@ -2759,9 +2728,6 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2759 2728
2760 rt->fl = ort->fl; 2729 rt->fl = ort->fl;
2761 2730
2762 rt->idev = ort->idev;
2763 if (rt->idev)
2764 in_dev_hold(rt->idev);
2765 rt->rt_genid = rt_genid(net); 2731 rt->rt_genid = rt_genid(net);
2766 rt->rt_flags = ort->rt_flags; 2732 rt->rt_flags = ort->rt_flags;
2767 rt->rt_type = ort->rt_type; 2733 rt->rt_type = ort->rt_type;
@@ -2853,7 +2819,7 @@ static int rt_fill_info(struct net *net,
2853 if (rt->dst.tclassid) 2819 if (rt->dst.tclassid)
2854 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid); 2820 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2855#endif 2821#endif
2856 if (rt->fl.iif) 2822 if (rt_is_input_route(rt))
2857 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); 2823 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2858 else if (rt->rt_src != rt->fl.fl4_src) 2824 else if (rt->rt_src != rt->fl.fl4_src)
2859 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src); 2825 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
@@ -2878,7 +2844,7 @@ static int rt_fill_info(struct net *net,
2878 } 2844 }
2879 } 2845 }
2880 2846
2881 if (rt->fl.iif) { 2847 if (rt_is_input_route(rt)) {
2882#ifdef CONFIG_IP_MROUTE 2848#ifdef CONFIG_IP_MROUTE
2883 __be32 dst = rt->rt_dst; 2849 __be32 dst = rt->rt_dst;
2884 2850
@@ -2973,13 +2939,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2973 err = -rt->dst.error; 2939 err = -rt->dst.error;
2974 } else { 2940 } else {
2975 struct flowi fl = { 2941 struct flowi fl = {
2976 .nl_u = { 2942 .fl4_dst = dst,
2977 .ip4_u = { 2943 .fl4_src = src,
2978 .daddr = dst, 2944 .fl4_tos = rtm->rtm_tos,
2979 .saddr = src,
2980 .tos = rtm->rtm_tos,
2981 },
2982 },
2983 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, 2945 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2984 .mark = mark, 2946 .mark = mark,
2985 }; 2947 };
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 650cace2180..47519205a01 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -346,17 +346,14 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
346 */ 346 */
347 { 347 {
348 struct flowi fl = { .mark = sk->sk_mark, 348 struct flowi fl = { .mark = sk->sk_mark,
349 .nl_u = { .ip4_u = 349 .fl4_dst = ((opt && opt->srr) ?
350 { .daddr = ((opt && opt->srr) ? 350 opt->faddr : ireq->rmt_addr),
351 opt->faddr : 351 .fl4_src = ireq->loc_addr,
352 ireq->rmt_addr), 352 .fl4_tos = RT_CONN_FLAGS(sk),
353 .saddr = ireq->loc_addr,
354 .tos = RT_CONN_FLAGS(sk) } },
355 .proto = IPPROTO_TCP, 353 .proto = IPPROTO_TCP,
356 .flags = inet_sk_flowi_flags(sk), 354 .flags = inet_sk_flowi_flags(sk),
357 .uli_u = { .ports = 355 .fl_ip_sport = th->dest,
358 { .sport = th->dest, 356 .fl_ip_dport = th->source };
359 .dport = th->source } } };
360 security_req_classify_flow(req, &fl); 357 security_req_classify_flow(req, &fl);
361 if (ip_route_output_key(sock_net(sk), &rt, &fl)) { 358 if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
362 reqsk_free(req); 359 reqsk_free(req);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f15c36a706e..6c11eece262 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1193,7 +1193,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1193 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1193 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1194 1194
1195 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1195 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1196 KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1196 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1197 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1197 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1198#endif 1198#endif
1199 1199
@@ -1477,10 +1477,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1477 * shouldn't happen. 1477 * shouldn't happen.
1478 */ 1478 */
1479 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 1479 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1480 KERN_INFO "recvmsg bug: copied %X " 1480 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1481 "seq %X rcvnxt %X fl %X\n", *seq, 1481 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1482 TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 1482 flags))
1483 flags))
1484 break; 1483 break;
1485 1484
1486 offset = *seq - TCP_SKB_CB(skb)->seq; 1485 offset = *seq - TCP_SKB_CB(skb)->seq;
@@ -1490,10 +1489,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1490 goto found_ok_skb; 1489 goto found_ok_skb;
1491 if (tcp_hdr(skb)->fin) 1490 if (tcp_hdr(skb)->fin)
1492 goto found_fin_ok; 1491 goto found_fin_ok;
1493 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " 1492 WARN(!(flags & MSG_PEEK),
1494 "copied %X seq %X rcvnxt %X fl %X\n", 1493 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1495 *seq, TCP_SKB_CB(skb)->seq, 1494 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1496 tp->rcv_nxt, flags);
1497 } 1495 }
1498 1496
1499 /* Well, if we have backlog, try to process it now yet. */ 1497 /* Well, if we have backlog, try to process it now yet. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index e13da6de1fc..4fc3387aa99 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1210,12 +1210,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1210}; 1210};
1211#endif 1211#endif
1212 1212
1213static struct timewait_sock_ops tcp_timewait_sock_ops = {
1214 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1215 .twsk_unique = tcp_twsk_unique,
1216 .twsk_destructor= tcp_twsk_destructor,
1217};
1218
1219int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1213int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1220{ 1214{
1221 struct tcp_extend_values tmp_ext; 1215 struct tcp_extend_values tmp_ext;
@@ -1347,7 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1347 tcp_death_row.sysctl_tw_recycle && 1341 tcp_death_row.sysctl_tw_recycle &&
1348 (dst = inet_csk_route_req(sk, req)) != NULL && 1342 (dst = inet_csk_route_req(sk, req)) != NULL &&
1349 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1343 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1350 peer->v4daddr == saddr) { 1344 peer->daddr.a4 == saddr) {
1351 inet_peer_refcheck(peer); 1345 inet_peer_refcheck(peer);
1352 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1346 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1353 (s32)(peer->tcp_ts - req->ts_recent) > 1347 (s32)(peer->tcp_ts - req->ts_recent) >
@@ -1763,64 +1757,40 @@ do_time_wait:
1763 goto discard_it; 1757 goto discard_it;
1764} 1758}
1765 1759
1766/* VJ's idea. Save last timestamp seen from this destination 1760struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1767 * and hold it at least for normal timewait interval to use for duplicate
1768 * segment detection in subsequent connections, before they enter synchronized
1769 * state.
1770 */
1771
1772int tcp_v4_remember_stamp(struct sock *sk)
1773{ 1761{
1762 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1774 struct inet_sock *inet = inet_sk(sk); 1763 struct inet_sock *inet = inet_sk(sk);
1775 struct tcp_sock *tp = tcp_sk(sk); 1764 struct inet_peer *peer;
1776 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1777 struct inet_peer *peer = NULL;
1778 int release_it = 0;
1779 1765
1780 if (!rt || rt->rt_dst != inet->inet_daddr) { 1766 if (!rt || rt->rt_dst != inet->inet_daddr) {
1781 peer = inet_getpeer(inet->inet_daddr, 1); 1767 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1782 release_it = 1; 1768 *release_it = true;
1783 } else { 1769 } else {
1784 if (!rt->peer) 1770 if (!rt->peer)
1785 rt_bind_peer(rt, 1); 1771 rt_bind_peer(rt, 1);
1786 peer = rt->peer; 1772 peer = rt->peer;
1773 *release_it = false;
1787 } 1774 }
1788 1775
1789 if (peer) { 1776 return peer;
1790 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1791 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1792 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1793 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1794 peer->tcp_ts = tp->rx_opt.ts_recent;
1795 }
1796 if (release_it)
1797 inet_putpeer(peer);
1798 return 1;
1799 }
1800
1801 return 0;
1802} 1777}
1803EXPORT_SYMBOL(tcp_v4_remember_stamp); 1778EXPORT_SYMBOL(tcp_v4_get_peer);
1804 1779
1805int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw) 1780void *tcp_v4_tw_get_peer(struct sock *sk)
1806{ 1781{
1807 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1); 1782 struct inet_timewait_sock *tw = inet_twsk(sk);
1808
1809 if (peer) {
1810 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1811
1812 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1813 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1814 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1815 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1816 peer->tcp_ts = tcptw->tw_ts_recent;
1817 }
1818 inet_putpeer(peer);
1819 return 1;
1820 }
1821 1783
1822 return 0; 1784 return inet_getpeer_v4(tw->tw_daddr, 1);
1823} 1785}
1786EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1787
1788static struct timewait_sock_ops tcp_timewait_sock_ops = {
1789 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1790 .twsk_unique = tcp_twsk_unique,
1791 .twsk_destructor= tcp_twsk_destructor,
1792 .twsk_getpeer = tcp_v4_tw_get_peer,
1793};
1824 1794
1825const struct inet_connection_sock_af_ops ipv4_specific = { 1795const struct inet_connection_sock_af_ops ipv4_specific = {
1826 .queue_xmit = ip_queue_xmit, 1796 .queue_xmit = ip_queue_xmit,
@@ -1828,7 +1798,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
1828 .rebuild_header = inet_sk_rebuild_header, 1798 .rebuild_header = inet_sk_rebuild_header,
1829 .conn_request = tcp_v4_conn_request, 1799 .conn_request = tcp_v4_conn_request,
1830 .syn_recv_sock = tcp_v4_syn_recv_sock, 1800 .syn_recv_sock = tcp_v4_syn_recv_sock,
1831 .remember_stamp = tcp_v4_remember_stamp, 1801 .get_peer = tcp_v4_get_peer,
1832 .net_header_len = sizeof(struct iphdr), 1802 .net_header_len = sizeof(struct iphdr),
1833 .setsockopt = ip_setsockopt, 1803 .setsockopt = ip_setsockopt,
1834 .getsockopt = ip_getsockopt, 1804 .getsockopt = ip_getsockopt,
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index a66735f7596..80b1f80759a 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -49,6 +49,56 @@ struct inet_timewait_death_row tcp_death_row = {
49}; 49};
50EXPORT_SYMBOL_GPL(tcp_death_row); 50EXPORT_SYMBOL_GPL(tcp_death_row);
51 51
52/* VJ's idea. Save last timestamp seen from this destination
53 * and hold it at least for normal timewait interval to use for duplicate
54 * segment detection in subsequent connections, before they enter synchronized
55 * state.
56 */
57
58static int tcp_remember_stamp(struct sock *sk)
59{
60 const struct inet_connection_sock *icsk = inet_csk(sk);
61 struct tcp_sock *tp = tcp_sk(sk);
62 struct inet_peer *peer;
63 bool release_it;
64
65 peer = icsk->icsk_af_ops->get_peer(sk, &release_it);
66 if (peer) {
67 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
68 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
69 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
70 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
71 peer->tcp_ts = tp->rx_opt.ts_recent;
72 }
73 if (release_it)
74 inet_putpeer(peer);
75 return 1;
76 }
77
78 return 0;
79}
80
81static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
82{
83 struct sock *sk = (struct sock *) tw;
84 struct inet_peer *peer;
85
86 peer = twsk_getpeer(sk);
87 if (peer) {
88 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
89
90 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
91 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
92 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
93 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
94 peer->tcp_ts = tcptw->tw_ts_recent;
95 }
96 inet_putpeer(peer);
97 return 1;
98 }
99 return 0;
100}
101
52static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 102static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
53{ 103{
54 if (seq == s_win) 104 if (seq == s_win)
@@ -149,14 +199,9 @@ kill_with_rst:
149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 199 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
150 } 200 }
151 201
152 /* I am shamed, but failed to make it more elegant. 202 if (tcp_death_row.sysctl_tw_recycle &&
153 * Yes, it is direct reference to IP, which is impossible 203 tcptw->tw_ts_recent_stamp &&
154 * to generalize to IPv6. Taking into account that IPv6 204 tcp_tw_remember_stamp(tw))
155 * do not understand recycling in any case, it not
156 * a big problem in practice. --ANK */
157 if (tw->tw_family == AF_INET &&
158 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
159 tcp_v4_tw_remember_stamp(tw))
160 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout, 205 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
161 TCP_TIMEWAIT_LEN); 206 TCP_TIMEWAIT_LEN);
162 else 207 else
@@ -274,7 +319,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
274 int recycle_ok = 0; 319 int recycle_ok = 0;
275 320
276 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) 321 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
277 recycle_ok = icsk->icsk_af_ops->remember_stamp(sk); 322 recycle_ok = tcp_remember_stamp(sk);
278 323
279 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets) 324 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
280 tw = inet_twsk_alloc(sk, state); 325 tw = inet_twsk_alloc(sk, state);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 61c2463e275..97041f24cd2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -55,7 +55,7 @@ int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
55int sysctl_tcp_tso_win_divisor __read_mostly = 3; 55int sysctl_tcp_tso_win_divisor __read_mostly = 3;
56 56
57int sysctl_tcp_mtu_probing __read_mostly = 0; 57int sysctl_tcp_mtu_probing __read_mostly = 0;
58int sysctl_tcp_base_mss __read_mostly = 512; 58int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
59 59
60/* By default, RFC2861 behavior. */ 60/* By default, RFC2861 behavior. */
61int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 61int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
@@ -824,8 +824,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
824 &md5); 824 &md5);
825 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 825 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
826 826
827 if (tcp_packets_in_flight(tp) == 0) 827 if (tcp_packets_in_flight(tp) == 0) {
828 tcp_ca_event(sk, CA_EVENT_TX_START); 828 tcp_ca_event(sk, CA_EVENT_TX_START);
829 skb->ooo_okay = 1;
830 } else
831 skb->ooo_okay = 0;
829 832
830 skb_push(skb, tcp_header_size); 833 skb_push(skb, tcp_header_size);
831 skb_reset_transport_header(skb); 834 skb_reset_transport_header(skb);
@@ -2596,6 +2599,7 @@ int tcp_connect(struct sock *sk)
2596{ 2599{
2597 struct tcp_sock *tp = tcp_sk(sk); 2600 struct tcp_sock *tp = tcp_sk(sk);
2598 struct sk_buff *buff; 2601 struct sk_buff *buff;
2602 int err;
2599 2603
2600 tcp_connect_init(sk); 2604 tcp_connect_init(sk);
2601 2605
@@ -2618,7 +2622,9 @@ int tcp_connect(struct sock *sk)
2618 sk->sk_wmem_queued += buff->truesize; 2622 sk->sk_wmem_queued += buff->truesize;
2619 sk_mem_charge(sk, buff->truesize); 2623 sk_mem_charge(sk, buff->truesize);
2620 tp->packets_out += tcp_skb_pcount(buff); 2624 tp->packets_out += tcp_skb_pcount(buff);
2621 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 2625 err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2626 if (err == -ECONNREFUSED)
2627 return err;
2622 2628
2623 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2629 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2624 * in order to make this packet get counted in tcpOutSegs. 2630 * in order to make this packet get counted in tcpOutSegs.
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 6211e211417..85ee7eb7e38 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -154,7 +154,7 @@ static int tcpprobe_sprint(char *tbuf, int n)
154 struct timespec tv 154 struct timespec tv
155 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); 155 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
156 156
157 return snprintf(tbuf, n, 157 return scnprintf(tbuf, n,
158 "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n", 158 "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
159 (unsigned long) tv.tv_sec, 159 (unsigned long) tv.tv_sec,
160 (unsigned long) tv.tv_nsec, 160 (unsigned long) tv.tv_nsec,
@@ -174,7 +174,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
174 return -EINVAL; 174 return -EINVAL;
175 175
176 while (cnt < len) { 176 while (cnt < len) {
177 char tbuf[128]; 177 char tbuf[164];
178 int width; 178 int width;
179 179
180 /* Wait for data in buffer */ 180 /* Wait for data in buffer */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5e0a3a582a5..b37181da487 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -430,7 +430,7 @@ begin:
430 430
431 if (result) { 431 if (result) {
432exact_match: 432exact_match:
433 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 433 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
434 result = NULL; 434 result = NULL;
435 else if (unlikely(compute_score2(result, net, saddr, sport, 435 else if (unlikely(compute_score2(result, net, saddr, sport,
436 daddr, hnum, dif) < badness)) { 436 daddr, hnum, dif) < badness)) {
@@ -500,7 +500,7 @@ begin:
500 goto begin; 500 goto begin;
501 501
502 if (result) { 502 if (result) {
503 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 503 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
504 result = NULL; 504 result = NULL;
505 else if (unlikely(compute_score(result, net, saddr, hnum, sport, 505 else if (unlikely(compute_score(result, net, saddr, hnum, sport,
506 daddr, dport, dif) < badness)) { 506 daddr, dport, dif) < badness)) {
@@ -890,15 +890,13 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
890 if (rt == NULL) { 890 if (rt == NULL) {
891 struct flowi fl = { .oif = ipc.oif, 891 struct flowi fl = { .oif = ipc.oif,
892 .mark = sk->sk_mark, 892 .mark = sk->sk_mark,
893 .nl_u = { .ip4_u = 893 .fl4_dst = faddr,
894 { .daddr = faddr, 894 .fl4_src = saddr,
895 .saddr = saddr, 895 .fl4_tos = tos,
896 .tos = tos } },
897 .proto = sk->sk_protocol, 896 .proto = sk->sk_protocol,
898 .flags = inet_sk_flowi_flags(sk), 897 .flags = inet_sk_flowi_flags(sk),
899 .uli_u = { .ports = 898 .fl_ip_sport = inet->inet_sport,
900 { .sport = inet->inet_sport, 899 .fl_ip_dport = dport };
901 .dport = dport } } };
902 struct net *net = sock_net(sk); 900 struct net *net = sock_net(sk);
903 901
904 security_sk_classify_flow(sk, &fl); 902 security_sk_classify_flow(sk, &fl);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 4464f3bff6a..b057d40adde 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -11,6 +11,7 @@
11#include <linux/err.h> 11#include <linux/err.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/inetdevice.h> 13#include <linux/inetdevice.h>
14#include <linux/if_tunnel.h>
14#include <net/dst.h> 15#include <net/dst.h>
15#include <net/xfrm.h> 16#include <net/xfrm.h>
16#include <net/ip.h> 17#include <net/ip.h>
@@ -22,12 +23,8 @@ static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
22 xfrm_address_t *daddr) 23 xfrm_address_t *daddr)
23{ 24{
24 struct flowi fl = { 25 struct flowi fl = {
25 .nl_u = { 26 .fl4_dst = daddr->a4,
26 .ip4_u = { 27 .fl4_tos = tos,
27 .tos = tos,
28 .daddr = daddr->a4,
29 },
30 },
31 }; 28 };
32 struct dst_entry *dst; 29 struct dst_entry *dst;
33 struct rtable *rt; 30 struct rtable *rt;
@@ -80,10 +77,6 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
80 xdst->u.dst.dev = dev; 77 xdst->u.dst.dev = dev;
81 dev_hold(dev); 78 dev_hold(dev);
82 79
83 xdst->u.rt.idev = in_dev_get(dev);
84 if (!xdst->u.rt.idev)
85 return -ENODEV;
86
87 xdst->u.rt.peer = rt->peer; 80 xdst->u.rt.peer = rt->peer;
88 if (rt->peer) 81 if (rt->peer)
89 atomic_inc(&rt->peer->refcnt); 82 atomic_inc(&rt->peer->refcnt);
@@ -158,6 +151,20 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
158 fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); 151 fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
159 } 152 }
160 break; 153 break;
154
155 case IPPROTO_GRE:
156 if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
157 __be16 *greflags = (__be16 *)xprth;
158 __be32 *gre_hdr = (__be32 *)xprth;
159
160 if (greflags[0] & GRE_KEY) {
161 if (greflags[0] & GRE_CSUM)
162 gre_hdr++;
163 fl->fl_gre_key = gre_hdr[1];
164 }
165 }
166 break;
167
161 default: 168 default:
162 fl->fl_ipsec_spi = 0; 169 fl->fl_ipsec_spi = 0;
163 break; 170 break;
@@ -189,8 +196,6 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
189{ 196{
190 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 197 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
191 198
192 if (likely(xdst->u.rt.idev))
193 in_dev_put(xdst->u.rt.idev);
194 if (likely(xdst->u.rt.peer)) 199 if (likely(xdst->u.rt.peer))
195 inet_putpeer(xdst->u.rt.peer); 200 inet_putpeer(xdst->u.rt.peer);
196 xfrm_dst_destroy(xdst); 201 xfrm_dst_destroy(xdst);
@@ -199,27 +204,9 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
199static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 204static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
200 int unregister) 205 int unregister)
201{ 206{
202 struct xfrm_dst *xdst;
203
204 if (!unregister) 207 if (!unregister)
205 return; 208 return;
206 209
207 xdst = (struct xfrm_dst *)dst;
208 if (xdst->u.rt.idev->dev == dev) {
209 struct in_device *loopback_idev =
210 in_dev_get(dev_net(dev)->loopback_dev);
211 BUG_ON(!loopback_idev);
212
213 do {
214 in_dev_put(xdst->u.rt.idev);
215 xdst->u.rt.idev = loopback_idev;
216 in_dev_hold(loopback_idev);
217 xdst = (struct xfrm_dst *)xdst->u.dst.child;
218 } while (xdst->u.dst.xfrm);
219
220 __in_dev_put(loopback_idev);
221 }
222
223 xfrm_dst_ifdown(dst, dev); 210 xfrm_dst_ifdown(dst, dev);
224} 211}
225 212
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 23cc8e1ce8d..1023ad0d2b1 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3836,6 +3836,15 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3836 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao; 3836 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
3837} 3837}
3838 3838
3839static inline size_t inet6_ifla6_size(void)
3840{
3841 return nla_total_size(4) /* IFLA_INET6_FLAGS */
3842 + nla_total_size(sizeof(struct ifla_cacheinfo))
3843 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
3844 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
3845 + nla_total_size(ICMP6_MIB_MAX * 8); /* IFLA_INET6_ICMP6STATS */
3846}
3847
3839static inline size_t inet6_if_nlmsg_size(void) 3848static inline size_t inet6_if_nlmsg_size(void)
3840{ 3849{
3841 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 3850 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
@@ -3843,13 +3852,7 @@ static inline size_t inet6_if_nlmsg_size(void)
3843 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 3852 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
3844 + nla_total_size(4) /* IFLA_MTU */ 3853 + nla_total_size(4) /* IFLA_MTU */
3845 + nla_total_size(4) /* IFLA_LINK */ 3854 + nla_total_size(4) /* IFLA_LINK */
3846 + nla_total_size( /* IFLA_PROTINFO */ 3855 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
3847 nla_total_size(4) /* IFLA_INET6_FLAGS */
3848 + nla_total_size(sizeof(struct ifla_cacheinfo))
3849 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
3850 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
3851 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
3852 );
3853} 3856}
3854 3857
3855static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib, 3858static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
@@ -3896,15 +3899,70 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
3896 } 3899 }
3897} 3900}
3898 3901
3902static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
3903{
3904 struct nlattr *nla;
3905 struct ifla_cacheinfo ci;
3906
3907 NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
3908
3909 ci.max_reasm_len = IPV6_MAXPLEN;
3910 ci.tstamp = cstamp_delta(idev->tstamp);
3911 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
3912 ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
3913 NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
3914
3915 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
3916 if (nla == NULL)
3917 goto nla_put_failure;
3918 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
3919
3920 /* XXX - MC not implemented */
3921
3922 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
3923 if (nla == NULL)
3924 goto nla_put_failure;
3925 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
3926
3927 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
3928 if (nla == NULL)
3929 goto nla_put_failure;
3930 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
3931
3932 return 0;
3933
3934nla_put_failure:
3935 return -EMSGSIZE;
3936}
3937
3938static size_t inet6_get_link_af_size(const struct net_device *dev)
3939{
3940 if (!__in6_dev_get(dev))
3941 return 0;
3942
3943 return inet6_ifla6_size();
3944}
3945
3946static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
3947{
3948 struct inet6_dev *idev = __in6_dev_get(dev);
3949
3950 if (!idev)
3951 return -ENODATA;
3952
3953 if (inet6_fill_ifla6_attrs(skb, idev) < 0)
3954 return -EMSGSIZE;
3955
3956 return 0;
3957}
3958
3899static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, 3959static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
3900 u32 pid, u32 seq, int event, unsigned int flags) 3960 u32 pid, u32 seq, int event, unsigned int flags)
3901{ 3961{
3902 struct net_device *dev = idev->dev; 3962 struct net_device *dev = idev->dev;
3903 struct nlattr *nla;
3904 struct ifinfomsg *hdr; 3963 struct ifinfomsg *hdr;
3905 struct nlmsghdr *nlh; 3964 struct nlmsghdr *nlh;
3906 void *protoinfo; 3965 void *protoinfo;
3907 struct ifla_cacheinfo ci;
3908 3966
3909 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 3967 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
3910 if (nlh == NULL) 3968 if (nlh == NULL)
@@ -3931,30 +3989,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
3931 if (protoinfo == NULL) 3989 if (protoinfo == NULL)
3932 goto nla_put_failure; 3990 goto nla_put_failure;
3933 3991
3934 NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); 3992 if (inet6_fill_ifla6_attrs(skb, idev) < 0)
3935
3936 ci.max_reasm_len = IPV6_MAXPLEN;
3937 ci.tstamp = cstamp_delta(idev->tstamp);
3938 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
3939 ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
3940 NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
3941
3942 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
3943 if (nla == NULL)
3944 goto nla_put_failure;
3945 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
3946
3947 /* XXX - MC not implemented */
3948
3949 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
3950 if (nla == NULL)
3951 goto nla_put_failure;
3952 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
3953
3954 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
3955 if (nla == NULL)
3956 goto nla_put_failure; 3993 goto nla_put_failure;
3957 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
3958 3994
3959 nla_nest_end(skb, protoinfo); 3995 nla_nest_end(skb, protoinfo);
3960 return nlmsg_end(skb, nlh); 3996 return nlmsg_end(skb, nlh);
@@ -4625,6 +4661,12 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
4625} 4661}
4626EXPORT_SYMBOL(unregister_inet6addr_notifier); 4662EXPORT_SYMBOL(unregister_inet6addr_notifier);
4627 4663
4664static struct rtnl_af_ops inet6_ops = {
4665 .family = AF_INET6,
4666 .fill_link_af = inet6_fill_link_af,
4667 .get_link_af_size = inet6_get_link_af_size,
4668};
4669
4628/* 4670/*
4629 * Init / cleanup code 4671 * Init / cleanup code
4630 */ 4672 */
@@ -4676,6 +4718,10 @@ int __init addrconf_init(void)
4676 4718
4677 addrconf_verify(0); 4719 addrconf_verify(0);
4678 4720
4721 err = rtnl_af_register(&inet6_ops);
4722 if (err < 0)
4723 goto errout_af;
4724
4679 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo); 4725 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo);
4680 if (err < 0) 4726 if (err < 0)
4681 goto errout; 4727 goto errout;
@@ -4691,6 +4737,8 @@ int __init addrconf_init(void)
4691 4737
4692 return 0; 4738 return 0;
4693errout: 4739errout:
4740 rtnl_af_unregister(&inet6_ops);
4741errout_af:
4694 unregister_netdevice_notifier(&ipv6_dev_notf); 4742 unregister_netdevice_notifier(&ipv6_dev_notf);
4695errlo: 4743errlo:
4696 unregister_pernet_subsys(&addrconf_ops); 4744 unregister_pernet_subsys(&addrconf_ops);
@@ -4711,6 +4759,8 @@ void addrconf_cleanup(void)
4711 4759
4712 rtnl_lock(); 4760 rtnl_lock();
4713 4761
4762 __rtnl_af_unregister(&inet6_ops);
4763
4714 /* clean dev list */ 4764 /* clean dev list */
4715 for_each_netdev(&init_net, dev) { 4765 for_each_netdev(&init_net, dev) {
4716 if (__in6_dev_get(dev) == NULL) 4766 if (__in6_dev_get(dev) == NULL)
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8a1628023bd..e46305d1815 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -54,24 +54,54 @@ int inet6_csk_bind_conflict(const struct sock *sk,
54 54
55EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); 55EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
56 56
57struct dst_entry *inet6_csk_route_req(struct sock *sk,
58 const struct request_sock *req)
59{
60 struct inet6_request_sock *treq = inet6_rsk(req);
61 struct ipv6_pinfo *np = inet6_sk(sk);
62 struct in6_addr *final_p, final;
63 struct dst_entry *dst;
64 struct flowi fl;
65
66 memset(&fl, 0, sizeof(fl));
67 fl.proto = IPPROTO_TCP;
68 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
69 final_p = fl6_update_dst(&fl, np->opt, &final);
70 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
71 fl.oif = sk->sk_bound_dev_if;
72 fl.mark = sk->sk_mark;
73 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
74 fl.fl_ip_sport = inet_rsk(req)->loc_port;
75 security_req_classify_flow(req, &fl);
76
77 if (ip6_dst_lookup(sk, &dst, &fl))
78 return NULL;
79
80 if (final_p)
81 ipv6_addr_copy(&fl.fl6_dst, final_p);
82
83 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
84 return NULL;
85
86 return dst;
87}
88
57/* 89/*
58 * request_sock (formerly open request) hash tables. 90 * request_sock (formerly open request) hash tables.
59 */ 91 */
60static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, 92static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
61 const u32 rnd, const u16 synq_hsize) 93 const u32 rnd, const u16 synq_hsize)
62{ 94{
63 u32 a = (__force u32)raddr->s6_addr32[0]; 95 u32 c;
64 u32 b = (__force u32)raddr->s6_addr32[1]; 96
65 u32 c = (__force u32)raddr->s6_addr32[2]; 97 c = jhash_3words((__force u32)raddr->s6_addr32[0],
66 98 (__force u32)raddr->s6_addr32[1],
67 a += JHASH_GOLDEN_RATIO; 99 (__force u32)raddr->s6_addr32[2],
68 b += JHASH_GOLDEN_RATIO; 100 rnd);
69 c += rnd; 101
70 __jhash_mix(a, b, c); 102 c = jhash_2words((__force u32)raddr->s6_addr32[3],
71 103 (__force u32)rport,
72 a += (__force u32)raddr->s6_addr32[3]; 104 c);
73 b += (__force u32)rport;
74 __jhash_mix(a, b, c);
75 105
76 return c & (synq_hsize - 1); 106 return c & (synq_hsize - 1);
77} 107}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 70e891a20fb..4f4483e697b 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -58,8 +58,6 @@ MODULE_AUTHOR("Ville Nuorvala");
58MODULE_DESCRIPTION("IPv6 tunneling device"); 58MODULE_DESCRIPTION("IPv6 tunneling device");
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60 60
61#define IPV6_TLV_TEL_DST_SIZE 8
62
63#ifdef IP6_TNL_DEBUG 61#ifdef IP6_TNL_DEBUG
64#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) 62#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
65#else 63#else
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 6f32ffce702..9fab274019c 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1843,9 +1843,7 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1843 1843
1844 fl = (struct flowi) { 1844 fl = (struct flowi) {
1845 .oif = vif->link, 1845 .oif = vif->link,
1846 .nl_u = { .ip6_u = 1846 .fl6_dst = ipv6h->daddr,
1847 { .daddr = ipv6h->daddr, }
1848 }
1849 }; 1847 };
1850 1848
1851 dst = ip6_route_output(net, NULL, &fl); 1849 dst = ip6_route_output(net, NULL, &fl);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index d1444b95ad7..49f986d626a 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -82,7 +82,7 @@ static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
82static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; 82static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
83 83
84/* Big mc list lock for all the sockets */ 84/* Big mc list lock for all the sockets */
85static DEFINE_RWLOCK(ipv6_sk_mc_lock); 85static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
86 86
87static void igmp6_join_group(struct ifmcaddr6 *ma); 87static void igmp6_join_group(struct ifmcaddr6 *ma);
88static void igmp6_leave_group(struct ifmcaddr6 *ma); 88static void igmp6_leave_group(struct ifmcaddr6 *ma);
@@ -123,6 +123,11 @@ int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
123 * socket join on multicast group 123 * socket join on multicast group
124 */ 124 */
125 125
126#define for_each_pmc_rcu(np, pmc) \
127 for (pmc = rcu_dereference(np->ipv6_mc_list); \
128 pmc != NULL; \
129 pmc = rcu_dereference(pmc->next))
130
126int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) 131int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
127{ 132{
128 struct net_device *dev = NULL; 133 struct net_device *dev = NULL;
@@ -134,15 +139,15 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
134 if (!ipv6_addr_is_multicast(addr)) 139 if (!ipv6_addr_is_multicast(addr))
135 return -EINVAL; 140 return -EINVAL;
136 141
137 read_lock_bh(&ipv6_sk_mc_lock); 142 rcu_read_lock();
138 for (mc_lst=np->ipv6_mc_list; mc_lst; mc_lst=mc_lst->next) { 143 for_each_pmc_rcu(np, mc_lst) {
139 if ((ifindex == 0 || mc_lst->ifindex == ifindex) && 144 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
140 ipv6_addr_equal(&mc_lst->addr, addr)) { 145 ipv6_addr_equal(&mc_lst->addr, addr)) {
141 read_unlock_bh(&ipv6_sk_mc_lock); 146 rcu_read_unlock();
142 return -EADDRINUSE; 147 return -EADDRINUSE;
143 } 148 }
144 } 149 }
145 read_unlock_bh(&ipv6_sk_mc_lock); 150 rcu_read_unlock();
146 151
147 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL); 152 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
148 153
@@ -186,33 +191,41 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
186 return err; 191 return err;
187 } 192 }
188 193
189 write_lock_bh(&ipv6_sk_mc_lock); 194 spin_lock(&ipv6_sk_mc_lock);
190 mc_lst->next = np->ipv6_mc_list; 195 mc_lst->next = np->ipv6_mc_list;
191 np->ipv6_mc_list = mc_lst; 196 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
192 write_unlock_bh(&ipv6_sk_mc_lock); 197 spin_unlock(&ipv6_sk_mc_lock);
193 198
194 rcu_read_unlock(); 199 rcu_read_unlock();
195 200
196 return 0; 201 return 0;
197} 202}
198 203
204static void ipv6_mc_socklist_reclaim(struct rcu_head *head)
205{
206 kfree(container_of(head, struct ipv6_mc_socklist, rcu));
207}
199/* 208/*
200 * socket leave on multicast group 209 * socket leave on multicast group
201 */ 210 */
202int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) 211int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
203{ 212{
204 struct ipv6_pinfo *np = inet6_sk(sk); 213 struct ipv6_pinfo *np = inet6_sk(sk);
205 struct ipv6_mc_socklist *mc_lst, **lnk; 214 struct ipv6_mc_socklist *mc_lst;
215 struct ipv6_mc_socklist __rcu **lnk;
206 struct net *net = sock_net(sk); 216 struct net *net = sock_net(sk);
207 217
208 write_lock_bh(&ipv6_sk_mc_lock); 218 spin_lock(&ipv6_sk_mc_lock);
209 for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) { 219 for (lnk = &np->ipv6_mc_list;
220 (mc_lst = rcu_dereference_protected(*lnk,
221 lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
222 lnk = &mc_lst->next) {
210 if ((ifindex == 0 || mc_lst->ifindex == ifindex) && 223 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
211 ipv6_addr_equal(&mc_lst->addr, addr)) { 224 ipv6_addr_equal(&mc_lst->addr, addr)) {
212 struct net_device *dev; 225 struct net_device *dev;
213 226
214 *lnk = mc_lst->next; 227 *lnk = mc_lst->next;
215 write_unlock_bh(&ipv6_sk_mc_lock); 228 spin_unlock(&ipv6_sk_mc_lock);
216 229
217 rcu_read_lock(); 230 rcu_read_lock();
218 dev = dev_get_by_index_rcu(net, mc_lst->ifindex); 231 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
@@ -225,11 +238,12 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
225 } else 238 } else
226 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 239 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
227 rcu_read_unlock(); 240 rcu_read_unlock();
228 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 241 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
242 call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim);
229 return 0; 243 return 0;
230 } 244 }
231 } 245 }
232 write_unlock_bh(&ipv6_sk_mc_lock); 246 spin_unlock(&ipv6_sk_mc_lock);
233 247
234 return -EADDRNOTAVAIL; 248 return -EADDRNOTAVAIL;
235} 249}
@@ -257,7 +271,7 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
257 return NULL; 271 return NULL;
258 idev = __in6_dev_get(dev); 272 idev = __in6_dev_get(dev);
259 if (!idev) 273 if (!idev)
260 return NULL;; 274 return NULL;
261 read_lock_bh(&idev->lock); 275 read_lock_bh(&idev->lock);
262 if (idev->dead) { 276 if (idev->dead) {
263 read_unlock_bh(&idev->lock); 277 read_unlock_bh(&idev->lock);
@@ -272,12 +286,13 @@ void ipv6_sock_mc_close(struct sock *sk)
272 struct ipv6_mc_socklist *mc_lst; 286 struct ipv6_mc_socklist *mc_lst;
273 struct net *net = sock_net(sk); 287 struct net *net = sock_net(sk);
274 288
275 write_lock_bh(&ipv6_sk_mc_lock); 289 spin_lock(&ipv6_sk_mc_lock);
276 while ((mc_lst = np->ipv6_mc_list) != NULL) { 290 while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
291 lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
277 struct net_device *dev; 292 struct net_device *dev;
278 293
279 np->ipv6_mc_list = mc_lst->next; 294 np->ipv6_mc_list = mc_lst->next;
280 write_unlock_bh(&ipv6_sk_mc_lock); 295 spin_unlock(&ipv6_sk_mc_lock);
281 296
282 rcu_read_lock(); 297 rcu_read_lock();
283 dev = dev_get_by_index_rcu(net, mc_lst->ifindex); 298 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
@@ -290,11 +305,13 @@ void ipv6_sock_mc_close(struct sock *sk)
290 } else 305 } else
291 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 306 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
292 rcu_read_unlock(); 307 rcu_read_unlock();
293 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
294 308
295 write_lock_bh(&ipv6_sk_mc_lock); 309 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
310 call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim);
311
312 spin_lock(&ipv6_sk_mc_lock);
296 } 313 }
297 write_unlock_bh(&ipv6_sk_mc_lock); 314 spin_unlock(&ipv6_sk_mc_lock);
298} 315}
299 316
300int ip6_mc_source(int add, int omode, struct sock *sk, 317int ip6_mc_source(int add, int omode, struct sock *sk,
@@ -328,8 +345,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
328 345
329 err = -EADDRNOTAVAIL; 346 err = -EADDRNOTAVAIL;
330 347
331 read_lock(&ipv6_sk_mc_lock); 348 for_each_pmc_rcu(inet6, pmc) {
332 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
333 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) 349 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
334 continue; 350 continue;
335 if (ipv6_addr_equal(&pmc->addr, group)) 351 if (ipv6_addr_equal(&pmc->addr, group))
@@ -428,7 +444,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
428done: 444done:
429 if (pmclocked) 445 if (pmclocked)
430 write_unlock(&pmc->sflock); 446 write_unlock(&pmc->sflock);
431 read_unlock(&ipv6_sk_mc_lock);
432 read_unlock_bh(&idev->lock); 447 read_unlock_bh(&idev->lock);
433 rcu_read_unlock(); 448 rcu_read_unlock();
434 if (leavegroup) 449 if (leavegroup)
@@ -466,14 +481,13 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
466 dev = idev->dev; 481 dev = idev->dev;
467 482
468 err = 0; 483 err = 0;
469 read_lock(&ipv6_sk_mc_lock);
470 484
471 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { 485 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
472 leavegroup = 1; 486 leavegroup = 1;
473 goto done; 487 goto done;
474 } 488 }
475 489
476 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { 490 for_each_pmc_rcu(inet6, pmc) {
477 if (pmc->ifindex != gsf->gf_interface) 491 if (pmc->ifindex != gsf->gf_interface)
478 continue; 492 continue;
479 if (ipv6_addr_equal(&pmc->addr, group)) 493 if (ipv6_addr_equal(&pmc->addr, group))
@@ -521,7 +535,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
521 write_unlock(&pmc->sflock); 535 write_unlock(&pmc->sflock);
522 err = 0; 536 err = 0;
523done: 537done:
524 read_unlock(&ipv6_sk_mc_lock);
525 read_unlock_bh(&idev->lock); 538 read_unlock_bh(&idev->lock);
526 rcu_read_unlock(); 539 rcu_read_unlock();
527 if (leavegroup) 540 if (leavegroup)
@@ -562,7 +575,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
562 * so reading the list is safe. 575 * so reading the list is safe.
563 */ 576 */
564 577
565 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { 578 for_each_pmc_rcu(inet6, pmc) {
566 if (pmc->ifindex != gsf->gf_interface) 579 if (pmc->ifindex != gsf->gf_interface)
567 continue; 580 continue;
568 if (ipv6_addr_equal(group, &pmc->addr)) 581 if (ipv6_addr_equal(group, &pmc->addr))
@@ -612,13 +625,13 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
612 struct ip6_sf_socklist *psl; 625 struct ip6_sf_socklist *psl;
613 int rv = 1; 626 int rv = 1;
614 627
615 read_lock(&ipv6_sk_mc_lock); 628 rcu_read_lock();
616 for (mc = np->ipv6_mc_list; mc; mc = mc->next) { 629 for_each_pmc_rcu(np, mc) {
617 if (ipv6_addr_equal(&mc->addr, mc_addr)) 630 if (ipv6_addr_equal(&mc->addr, mc_addr))
618 break; 631 break;
619 } 632 }
620 if (!mc) { 633 if (!mc) {
621 read_unlock(&ipv6_sk_mc_lock); 634 rcu_read_unlock();
622 return 1; 635 return 1;
623 } 636 }
624 read_lock(&mc->sflock); 637 read_lock(&mc->sflock);
@@ -638,7 +651,7 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
638 rv = 0; 651 rv = 0;
639 } 652 }
640 read_unlock(&mc->sflock); 653 read_unlock(&mc->sflock);
641 read_unlock(&ipv6_sk_mc_lock); 654 rcu_read_unlock();
642 655
643 return rv; 656 return rv;
644} 657}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 998d6d27e7c..e18f8413020 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -141,18 +141,18 @@ struct neigh_table nd_tbl = {
141 .proxy_redo = pndisc_redo, 141 .proxy_redo = pndisc_redo,
142 .id = "ndisc_cache", 142 .id = "ndisc_cache",
143 .parms = { 143 .parms = {
144 .tbl = &nd_tbl, 144 .tbl = &nd_tbl,
145 .base_reachable_time = 30 * HZ, 145 .base_reachable_time = ND_REACHABLE_TIME,
146 .retrans_time = 1 * HZ, 146 .retrans_time = ND_RETRANS_TIMER,
147 .gc_staletime = 60 * HZ, 147 .gc_staletime = 60 * HZ,
148 .reachable_time = 30 * HZ, 148 .reachable_time = ND_REACHABLE_TIME,
149 .delay_probe_time = 5 * HZ, 149 .delay_probe_time = 5 * HZ,
150 .queue_len = 3, 150 .queue_len = 3,
151 .ucast_probes = 3, 151 .ucast_probes = 3,
152 .mcast_probes = 3, 152 .mcast_probes = 3,
153 .anycast_delay = 1 * HZ, 153 .anycast_delay = 1 * HZ,
154 .proxy_delay = (8 * HZ) / 10, 154 .proxy_delay = (8 * HZ) / 10,
155 .proxy_qlen = 64, 155 .proxy_qlen = 64,
156 }, 156 },
157 .gc_interval = 30 * HZ, 157 .gc_interval = 30 * HZ,
158 .gc_thresh1 = 128, 158 .gc_thresh1 = 128,
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 7155b2451d7..35915e8617f 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -18,10 +18,8 @@ int ip6_route_me_harder(struct sk_buff *skb)
18 struct flowi fl = { 18 struct flowi fl = {
19 .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, 19 .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
20 .mark = skb->mark, 20 .mark = skb->mark,
21 .nl_u = 21 .fl6_dst = iph->daddr,
22 { .ip6_u = 22 .fl6_src = iph->saddr,
23 { .daddr = iph->daddr,
24 .saddr = iph->saddr, } },
25 }; 23 };
26 24
27 dst = ip6_route_output(net, skb->sk, &fl); 25 dst = ip6_route_output(net, skb->sk, &fl);
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 0a432c9b079..abfee91ce81 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -11,13 +11,13 @@ obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
11obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o 11obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
12 12
13# objects for l3 independent conntrack 13# objects for l3 independent conntrack
14nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o 14nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
15 15
16# l3 independent conntrack 16# l3 independent conntrack
17obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o 17obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
18 18
19# defrag 19# defrag
20nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o 20nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
21obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o 21obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
22 22
23# matches 23# matches
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 0f276645375..07beeb06f75 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -104,26 +104,22 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
104unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, 104unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
105 const struct in6_addr *daddr, u32 rnd) 105 const struct in6_addr *daddr, u32 rnd)
106{ 106{
107 u32 a, b, c; 107 u32 c;
108 108
109 a = (__force u32)saddr->s6_addr32[0]; 109 c = jhash_3words((__force u32)saddr->s6_addr32[0],
110 b = (__force u32)saddr->s6_addr32[1]; 110 (__force u32)saddr->s6_addr32[1],
111 c = (__force u32)saddr->s6_addr32[2]; 111 (__force u32)saddr->s6_addr32[2],
112 112 rnd);
113 a += JHASH_GOLDEN_RATIO; 113
114 b += JHASH_GOLDEN_RATIO; 114 c = jhash_3words((__force u32)saddr->s6_addr32[3],
115 c += rnd; 115 (__force u32)daddr->s6_addr32[0],
116 __jhash_mix(a, b, c); 116 (__force u32)daddr->s6_addr32[1],
117 117 c);
118 a += (__force u32)saddr->s6_addr32[3]; 118
119 b += (__force u32)daddr->s6_addr32[0]; 119 c = jhash_3words((__force u32)daddr->s6_addr32[2],
120 c += (__force u32)daddr->s6_addr32[1]; 120 (__force u32)daddr->s6_addr32[3],
121 __jhash_mix(a, b, c); 121 (__force u32)id,
122 122 c);
123 a += (__force u32)daddr->s6_addr32[2];
124 b += (__force u32)daddr->s6_addr32[3];
125 c += (__force u32)id;
126 __jhash_mix(a, b, c);
127 123
128 return c & (INETFRAGS_HASHSZ - 1); 124 return c & (INETFRAGS_HASHSZ - 1);
129} 125}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 96455ffb76f..026caef0326 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -188,11 +188,29 @@ static void ip6_dst_destroy(struct dst_entry *dst)
188{ 188{
189 struct rt6_info *rt = (struct rt6_info *)dst; 189 struct rt6_info *rt = (struct rt6_info *)dst;
190 struct inet6_dev *idev = rt->rt6i_idev; 190 struct inet6_dev *idev = rt->rt6i_idev;
191 struct inet_peer *peer = rt->rt6i_peer;
191 192
192 if (idev != NULL) { 193 if (idev != NULL) {
193 rt->rt6i_idev = NULL; 194 rt->rt6i_idev = NULL;
194 in6_dev_put(idev); 195 in6_dev_put(idev);
195 } 196 }
197 if (peer) {
198 BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
199 rt->rt6i_peer = NULL;
200 inet_putpeer(peer);
201 }
202}
203
204void rt6_bind_peer(struct rt6_info *rt, int create)
205{
206 struct inet_peer *peer;
207
208 if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
209 return;
210
211 peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
212 if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
213 inet_putpeer(peer);
196} 214}
197 215
198static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 216static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -558,11 +576,7 @@ struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
558{ 576{
559 struct flowi fl = { 577 struct flowi fl = {
560 .oif = oif, 578 .oif = oif,
561 .nl_u = { 579 .fl6_dst = *daddr,
562 .ip6_u = {
563 .daddr = *daddr,
564 },
565 },
566 }; 580 };
567 struct dst_entry *dst; 581 struct dst_entry *dst;
568 int flags = strict ? RT6_LOOKUP_F_IFACE : 0; 582 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
@@ -778,13 +792,9 @@ void ip6_route_input(struct sk_buff *skb)
778 int flags = RT6_LOOKUP_F_HAS_SADDR; 792 int flags = RT6_LOOKUP_F_HAS_SADDR;
779 struct flowi fl = { 793 struct flowi fl = {
780 .iif = skb->dev->ifindex, 794 .iif = skb->dev->ifindex,
781 .nl_u = { 795 .fl6_dst = iph->daddr,
782 .ip6_u = { 796 .fl6_src = iph->saddr,
783 .daddr = iph->daddr, 797 .fl6_flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
784 .saddr = iph->saddr,
785 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
786 },
787 },
788 .mark = skb->mark, 798 .mark = skb->mark,
789 .proto = iph->nexthdr, 799 .proto = iph->nexthdr,
790 }; 800 };
@@ -1463,12 +1473,8 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1463 struct ip6rd_flowi rdfl = { 1473 struct ip6rd_flowi rdfl = {
1464 .fl = { 1474 .fl = {
1465 .oif = dev->ifindex, 1475 .oif = dev->ifindex,
1466 .nl_u = { 1476 .fl6_dst = *dest,
1467 .ip6_u = { 1477 .fl6_src = *src,
1468 .daddr = *dest,
1469 .saddr = *src,
1470 },
1471 },
1472 }, 1478 },
1473 }; 1479 };
1474 1480
@@ -2465,8 +2471,6 @@ static int ip6_route_dev_notify(struct notifier_block *this,
2465 2471
2466#ifdef CONFIG_PROC_FS 2472#ifdef CONFIG_PROC_FS
2467 2473
2468#define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2469
2470struct rt6_proc_arg 2474struct rt6_proc_arg
2471{ 2475{
2472 char *buffer; 2476 char *buffer;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8c4d00c7cd2..8ce38f10a54 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -731,10 +731,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
731 } 731 }
732 732
733 { 733 {
734 struct flowi fl = { .nl_u = { .ip4_u = 734 struct flowi fl = { .fl4_dst = dst,
735 { .daddr = dst, 735 .fl4_src = tiph->saddr,
736 .saddr = tiph->saddr, 736 .fl4_tos = RT_TOS(tos),
737 .tos = RT_TOS(tos) } },
738 .oif = tunnel->parms.link, 737 .oif = tunnel->parms.link,
739 .proto = IPPROTO_IPV6 }; 738 .proto = IPPROTO_IPV6 };
740 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 739 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
@@ -856,10 +855,9 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
856 iph = &tunnel->parms.iph; 855 iph = &tunnel->parms.iph;
857 856
858 if (iph->daddr) { 857 if (iph->daddr) {
859 struct flowi fl = { .nl_u = { .ip4_u = 858 struct flowi fl = { .fl4_dst = iph->daddr,
860 { .daddr = iph->daddr, 859 .fl4_src = iph->saddr,
861 .saddr = iph->saddr, 860 .fl4_tos = RT_TOS(iph->tos),
862 .tos = RT_TOS(iph->tos) } },
863 .oif = tunnel->parms.link, 861 .oif = tunnel->parms.link,
864 .proto = IPPROTO_IPV6 }; 862 .proto = IPPROTO_IPV6 };
865 struct rtable *rt; 863 struct rtable *rt;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7e41e2cbb85..319458558df 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -130,6 +130,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
130 struct ipv6_pinfo *np = inet6_sk(sk); 130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk); 131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p, final; 132 struct in6_addr *saddr = NULL, *final_p, final;
133 struct rt6_info *rt;
133 struct flowi fl; 134 struct flowi fl;
134 struct dst_entry *dst; 135 struct dst_entry *dst;
135 int addr_type; 136 int addr_type;
@@ -280,6 +281,26 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
280 sk->sk_gso_type = SKB_GSO_TCPV6; 281 sk->sk_gso_type = SKB_GSO_TCPV6;
281 __ip6_dst_store(sk, dst, NULL, NULL); 282 __ip6_dst_store(sk, dst, NULL, NULL);
282 283
284 rt = (struct rt6_info *) dst;
285 if (tcp_death_row.sysctl_tw_recycle &&
286 !tp->rx_opt.ts_recent_stamp &&
287 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
288 struct inet_peer *peer = rt6_get_peer(rt);
289 /*
290 * VJ's idea. We save last timestamp seen from
291 * the destination in peer table, when entering state
292 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
293 * when trying new connection.
294 */
295 if (peer) {
296 inet_peer_refcheck(peer);
297 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
298 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
299 tp->rx_opt.ts_recent = peer->tcp_ts;
300 }
301 }
302 }
303
283 icsk->icsk_ext_hdr_len = 0; 304 icsk->icsk_ext_hdr_len = 0;
284 if (np->opt) 305 if (np->opt)
285 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 306 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
@@ -906,12 +927,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
906}; 927};
907#endif 928#endif
908 929
909static struct timewait_sock_ops tcp6_timewait_sock_ops = {
910 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
911 .twsk_unique = tcp_twsk_unique,
912 .twsk_destructor= tcp_twsk_destructor,
913};
914
915static void __tcp_v6_send_check(struct sk_buff *skb, 930static void __tcp_v6_send_check(struct sk_buff *skb,
916 struct in6_addr *saddr, struct in6_addr *daddr) 931 struct in6_addr *saddr, struct in6_addr *daddr)
917{ 932{
@@ -1176,6 +1191,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1176 struct ipv6_pinfo *np = inet6_sk(sk); 1191 struct ipv6_pinfo *np = inet6_sk(sk);
1177 struct tcp_sock *tp = tcp_sk(sk); 1192 struct tcp_sock *tp = tcp_sk(sk);
1178 __u32 isn = TCP_SKB_CB(skb)->when; 1193 __u32 isn = TCP_SKB_CB(skb)->when;
1194 struct dst_entry *dst = NULL;
1179#ifdef CONFIG_SYN_COOKIES 1195#ifdef CONFIG_SYN_COOKIES
1180 int want_cookie = 0; 1196 int want_cookie = 0;
1181#else 1197#else
@@ -1273,6 +1289,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1273 TCP_ECN_create_request(req, tcp_hdr(skb)); 1289 TCP_ECN_create_request(req, tcp_hdr(skb));
1274 1290
1275 if (!isn) { 1291 if (!isn) {
1292 struct inet_peer *peer = NULL;
1293
1276 if (ipv6_opt_accepted(sk, skb) || 1294 if (ipv6_opt_accepted(sk, skb) ||
1277 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1295 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1278 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1296 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1285,13 +1303,57 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1285 if (!sk->sk_bound_dev_if && 1303 if (!sk->sk_bound_dev_if &&
1286 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1304 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1287 treq->iif = inet6_iif(skb); 1305 treq->iif = inet6_iif(skb);
1288 if (!want_cookie) { 1306
1289 isn = tcp_v6_init_sequence(skb); 1307 if (want_cookie) {
1290 } else {
1291 isn = cookie_v6_init_sequence(sk, skb, &req->mss); 1308 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1292 req->cookie_ts = tmp_opt.tstamp_ok; 1309 req->cookie_ts = tmp_opt.tstamp_ok;
1310 goto have_isn;
1293 } 1311 }
1312
1313 /* VJ's idea. We save last timestamp seen
1314 * from the destination in peer table, when entering
1315 * state TIME-WAIT, and check against it before
1316 * accepting new connection request.
1317 *
1318 * If "isn" is not zero, this request hit alive
1319 * timewait bucket, so that all the necessary checks
1320 * are made in the function processing timewait state.
1321 */
1322 if (tmp_opt.saw_tstamp &&
1323 tcp_death_row.sysctl_tw_recycle &&
1324 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1325 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1326 ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
1327 &treq->rmt_addr)) {
1328 inet_peer_refcheck(peer);
1329 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1330 (s32)(peer->tcp_ts - req->ts_recent) >
1331 TCP_PAWS_WINDOW) {
1332 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1333 goto drop_and_release;
1334 }
1335 }
1336 /* Kill the following clause, if you dislike this way. */
1337 else if (!sysctl_tcp_syncookies &&
1338 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1339 (sysctl_max_syn_backlog >> 2)) &&
1340 (!peer || !peer->tcp_ts_stamp) &&
1341 (!dst || !dst_metric(dst, RTAX_RTT))) {
1342 /* Without syncookies last quarter of
1343 * backlog is filled with destinations,
1344 * proven to be alive.
1345 * It means that we continue to communicate
1346 * to destinations, already remembered
1347 * to the moment of synflood.
1348 */
1349 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1350 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1351 goto drop_and_release;
1352 }
1353
1354 isn = tcp_v6_init_sequence(skb);
1294 } 1355 }
1356have_isn:
1295 tcp_rsk(req)->snt_isn = isn; 1357 tcp_rsk(req)->snt_isn = isn;
1296 1358
1297 security_inet_conn_request(sk, skb, req); 1359 security_inet_conn_request(sk, skb, req);
@@ -1304,6 +1366,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1304 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1366 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1305 return 0; 1367 return 0;
1306 1368
1369drop_and_release:
1370 dst_release(dst);
1307drop_and_free: 1371drop_and_free:
1308 reqsk_free(req); 1372 reqsk_free(req);
1309drop: 1373drop:
@@ -1382,28 +1446,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1382 if (sk_acceptq_is_full(sk)) 1446 if (sk_acceptq_is_full(sk))
1383 goto out_overflow; 1447 goto out_overflow;
1384 1448
1385 if (dst == NULL) { 1449 if (!dst) {
1386 struct in6_addr *final_p, final; 1450 dst = inet6_csk_route_req(sk, req);
1387 struct flowi fl; 1451 if (!dst)
1388
1389 memset(&fl, 0, sizeof(fl));
1390 fl.proto = IPPROTO_TCP;
1391 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1392 final_p = fl6_update_dst(&fl, opt, &final);
1393 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1394 fl.oif = sk->sk_bound_dev_if;
1395 fl.mark = sk->sk_mark;
1396 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1397 fl.fl_ip_sport = inet_rsk(req)->loc_port;
1398 security_req_classify_flow(req, &fl);
1399
1400 if (ip6_dst_lookup(sk, &dst, &fl))
1401 goto out;
1402
1403 if (final_p)
1404 ipv6_addr_copy(&fl.fl6_dst, final_p);
1405
1406 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1407 goto out; 1452 goto out;
1408 } 1453 }
1409 1454
@@ -1818,19 +1863,51 @@ do_time_wait:
1818 goto discard_it; 1863 goto discard_it;
1819} 1864}
1820 1865
1821static int tcp_v6_remember_stamp(struct sock *sk) 1866static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1822{ 1867{
1823 /* Alas, not yet... */ 1868 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1824 return 0; 1869 struct ipv6_pinfo *np = inet6_sk(sk);
1870 struct inet_peer *peer;
1871
1872 if (!rt ||
1873 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1874 peer = inet_getpeer_v6(&np->daddr, 1);
1875 *release_it = true;
1876 } else {
1877 if (!rt->rt6i_peer)
1878 rt6_bind_peer(rt, 1);
1879 peer = rt->rt6i_peer;
1880 *release_it = true;
1881 }
1882
1883 return peer;
1825} 1884}
1826 1885
1886static void *tcp_v6_tw_get_peer(struct sock *sk)
1887{
1888 struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1889 struct inet_timewait_sock *tw = inet_twsk(sk);
1890
1891 if (tw->tw_family == AF_INET)
1892 return tcp_v4_tw_get_peer(sk);
1893
1894 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1895}
1896
1897static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1898 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1899 .twsk_unique = tcp_twsk_unique,
1900 .twsk_destructor= tcp_twsk_destructor,
1901 .twsk_getpeer = tcp_v6_tw_get_peer,
1902};
1903
1827static const struct inet_connection_sock_af_ops ipv6_specific = { 1904static const struct inet_connection_sock_af_ops ipv6_specific = {
1828 .queue_xmit = inet6_csk_xmit, 1905 .queue_xmit = inet6_csk_xmit,
1829 .send_check = tcp_v6_send_check, 1906 .send_check = tcp_v6_send_check,
1830 .rebuild_header = inet6_sk_rebuild_header, 1907 .rebuild_header = inet6_sk_rebuild_header,
1831 .conn_request = tcp_v6_conn_request, 1908 .conn_request = tcp_v6_conn_request,
1832 .syn_recv_sock = tcp_v6_syn_recv_sock, 1909 .syn_recv_sock = tcp_v6_syn_recv_sock,
1833 .remember_stamp = tcp_v6_remember_stamp, 1910 .get_peer = tcp_v6_get_peer,
1834 .net_header_len = sizeof(struct ipv6hdr), 1911 .net_header_len = sizeof(struct ipv6hdr),
1835 .setsockopt = ipv6_setsockopt, 1912 .setsockopt = ipv6_setsockopt,
1836 .getsockopt = ipv6_getsockopt, 1913 .getsockopt = ipv6_getsockopt,
@@ -1862,7 +1939,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
1862 .rebuild_header = inet_sk_rebuild_header, 1939 .rebuild_header = inet_sk_rebuild_header,
1863 .conn_request = tcp_v6_conn_request, 1940 .conn_request = tcp_v6_conn_request,
1864 .syn_recv_sock = tcp_v6_syn_recv_sock, 1941 .syn_recv_sock = tcp_v6_syn_recv_sock,
1865 .remember_stamp = tcp_v4_remember_stamp, 1942 .get_peer = tcp_v4_get_peer,
1866 .net_header_len = sizeof(struct iphdr), 1943 .net_header_len = sizeof(struct iphdr),
1867 .setsockopt = ipv6_setsockopt, 1944 .setsockopt = ipv6_setsockopt,
1868 .getsockopt = ipv6_getsockopt, 1945 .getsockopt = ipv6_getsockopt,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 91def93bec8..b541a4e009f 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -227,7 +227,7 @@ begin:
227 227
228 if (result) { 228 if (result) {
229exact_match: 229exact_match:
230 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 230 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
231 result = NULL; 231 result = NULL;
232 else if (unlikely(compute_score2(result, net, saddr, sport, 232 else if (unlikely(compute_score2(result, net, saddr, sport,
233 daddr, hnum, dif) < badness)) { 233 daddr, hnum, dif) < badness)) {
@@ -294,7 +294,7 @@ begin:
294 goto begin; 294 goto begin;
295 295
296 if (result) { 296 if (result) {
297 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 297 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
298 result = NULL; 298 result = NULL;
299 else if (unlikely(compute_score(result, net, hnum, saddr, sport, 299 else if (unlikely(compute_score(result, net, hnum, saddr, sport,
300 daddr, dport, dif) < badness)) { 300 daddr, dport, dif) < badness)) {
diff --git a/net/irda/ircomm/Makefile b/net/irda/ircomm/Makefile
index 48689458c08..ab23b5ba7e3 100644
--- a/net/irda/ircomm/Makefile
+++ b/net/irda/ircomm/Makefile
@@ -4,5 +4,5 @@
4 4
5obj-$(CONFIG_IRCOMM) += ircomm.o ircomm-tty.o 5obj-$(CONFIG_IRCOMM) += ircomm.o ircomm-tty.o
6 6
7ircomm-objs := ircomm_core.o ircomm_event.o ircomm_lmp.o ircomm_ttp.o 7ircomm-y := ircomm_core.o ircomm_event.o ircomm_lmp.o ircomm_ttp.o
8ircomm-tty-objs := ircomm_tty.o ircomm_tty_attach.o ircomm_tty_ioctl.o ircomm_param.o 8ircomm-tty-y := ircomm_tty.o ircomm_tty_attach.o ircomm_tty_ioctl.o ircomm_param.o
diff --git a/net/irda/irlan/Makefile b/net/irda/irlan/Makefile
index 77549bc8641..94eefbc8e6b 100644
--- a/net/irda/irlan/Makefile
+++ b/net/irda/irlan/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_IRLAN) += irlan.o 5obj-$(CONFIG_IRLAN) += irlan.o
6 6
7irlan-objs := irlan_common.o irlan_eth.o irlan_event.o irlan_client.o irlan_provider.o irlan_filter.o irlan_provider_event.o irlan_client_event.o 7irlan-y := irlan_common.o irlan_eth.o irlan_event.o irlan_client.o irlan_provider.o irlan_filter.o irlan_provider_event.o irlan_client_event.o
diff --git a/net/irda/irnet/Makefile b/net/irda/irnet/Makefile
index b3ee01e0def..61c365c8a2a 100644
--- a/net/irda/irnet/Makefile
+++ b/net/irda/irnet/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_IRNET) += irnet.o 5obj-$(CONFIG_IRNET) += irnet.o
6 6
7irnet-objs := irnet_ppp.o irnet_irda.o 7irnet-y := irnet_ppp.o irnet_irda.o
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 522e219f355..110efb704c9 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -476,15 +476,13 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
476 476
477 { 477 {
478 struct flowi fl = { .oif = sk->sk_bound_dev_if, 478 struct flowi fl = { .oif = sk->sk_bound_dev_if,
479 .nl_u = { .ip4_u = { 479 .fl4_dst = daddr,
480 .daddr = daddr, 480 .fl4_src = inet->inet_saddr,
481 .saddr = inet->inet_saddr, 481 .fl4_tos = RT_CONN_FLAGS(sk),
482 .tos = RT_CONN_FLAGS(sk) } },
483 .proto = sk->sk_protocol, 482 .proto = sk->sk_protocol,
484 .flags = inet_sk_flowi_flags(sk), 483 .flags = inet_sk_flowi_flags(sk),
485 .uli_u = { .ports = { 484 .fl_ip_sport = inet->inet_sport,
486 .sport = inet->inet_sport, 485 .fl_ip_dport = inet->inet_dport };
487 .dport = inet->inet_dport } } };
488 486
489 /* If this fails, retransmit mechanism of transport layer will 487 /* If this fails, retransmit mechanism of transport layer will
490 * keep trying until route appears or the connection times 488 * keep trying until route appears or the connection times
diff --git a/net/lapb/Makefile b/net/lapb/Makefile
index 53f7c90db16..fff797dfc88 100644
--- a/net/lapb/Makefile
+++ b/net/lapb/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_LAPB) += lapb.o 5obj-$(CONFIG_LAPB) += lapb.o
6 6
7lapb-objs := lapb_in.o lapb_out.o lapb_subr.o lapb_timer.o lapb_iface.o 7lapb-y := lapb_in.o lapb_out.o lapb_subr.o lapb_timer.o lapb_iface.o
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index e35dbe55f52..dfd3a648a55 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -316,7 +316,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
316 if (unlikely(addr->sllc_family != AF_LLC)) 316 if (unlikely(addr->sllc_family != AF_LLC))
317 goto out; 317 goto out;
318 rc = -ENODEV; 318 rc = -ENODEV;
319 rtnl_lock();
320 rcu_read_lock(); 319 rcu_read_lock();
321 if (sk->sk_bound_dev_if) { 320 if (sk->sk_bound_dev_if) {
322 llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); 321 llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
@@ -334,10 +333,11 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
334 } 333 }
335 } 334 }
336 } else 335 } else
337 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, 336 llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd,
338 addr->sllc_mac); 337 addr->sllc_mac);
338 if (llc->dev)
339 dev_hold(llc->dev);
339 rcu_read_unlock(); 340 rcu_read_unlock();
340 rtnl_unlock();
341 if (!llc->dev) 341 if (!llc->dev)
342 goto out; 342 goto out;
343 if (!addr->sllc_sap) { 343 if (!addr->sllc_sap) {
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index d2b03e0851e..4bd6ef0be38 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -147,6 +147,5 @@ struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
147 147
148void ieee80211_aes_key_free(struct crypto_cipher *tfm) 148void ieee80211_aes_key_free(struct crypto_cipher *tfm)
149{ 149{
150 if (tfm) 150 crypto_free_cipher(tfm);
151 crypto_free_cipher(tfm);
152} 151}
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index b4d66cca76d..d502b2684a6 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -128,6 +128,5 @@ struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[])
128 128
129void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm) 129void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
130{ 130{
131 if (tfm) 131 crypto_free_cipher(tfm);
132 crypto_free_cipher(tfm);
133} 132}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 18bd0e55060..0c544074479 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1299,6 +1299,13 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1299 struct ieee80211_local *local = wiphy_priv(wiphy); 1299 struct ieee80211_local *local = wiphy_priv(wiphy);
1300 int err; 1300 int err;
1301 1301
1302 if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
1303 err = drv_set_frag_threshold(local, wiphy->frag_threshold);
1304
1305 if (err)
1306 return err;
1307 }
1308
1302 if (changed & WIPHY_PARAM_COVERAGE_CLASS) { 1309 if (changed & WIPHY_PARAM_COVERAGE_CLASS) {
1303 err = drv_set_coverage_class(local, wiphy->coverage_class); 1310 err = drv_set_coverage_class(local, wiphy->coverage_class);
1304 1311
@@ -1621,6 +1628,23 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
1621 ieee80211_queue_work(&local->hw, &local->reconfig_filter); 1628 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
1622} 1629}
1623 1630
1631static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
1632{
1633 struct ieee80211_local *local = wiphy_priv(wiphy);
1634
1635 if (local->started)
1636 return -EOPNOTSUPP;
1637
1638 return drv_set_antenna(local, tx_ant, rx_ant);
1639}
1640
1641static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
1642{
1643 struct ieee80211_local *local = wiphy_priv(wiphy);
1644
1645 return drv_get_antenna(local, tx_ant, rx_ant);
1646}
1647
1624struct cfg80211_ops mac80211_config_ops = { 1648struct cfg80211_ops mac80211_config_ops = {
1625 .add_virtual_intf = ieee80211_add_iface, 1649 .add_virtual_intf = ieee80211_add_iface,
1626 .del_virtual_intf = ieee80211_del_iface, 1650 .del_virtual_intf = ieee80211_del_iface,
@@ -1673,4 +1697,6 @@ struct cfg80211_ops mac80211_config_ops = {
1673 .mgmt_tx = ieee80211_mgmt_tx, 1697 .mgmt_tx = ieee80211_mgmt_tx,
1674 .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config, 1698 .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config,
1675 .mgmt_frame_register = ieee80211_mgmt_frame_register, 1699 .mgmt_frame_register = ieee80211_mgmt_frame_register,
1700 .set_antenna = ieee80211_set_antenna,
1701 .get_antenna = ieee80211_get_antenna,
1676}; 1702};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 18260aa99c5..1f02e599a31 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -21,16 +21,30 @@ int mac80211_open_file_generic(struct inode *inode, struct file *file)
21 return 0; 21 return 0;
22} 22}
23 23
24#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ 24#define DEBUGFS_FORMAT_BUFFER_SIZE 100
25
26int mac80211_format_buffer(char __user *userbuf, size_t count,
27 loff_t *ppos, char *fmt, ...)
28{
29 va_list args;
30 char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
31 int res;
32
33 va_start(args, fmt);
34 res = vscnprintf(buf, sizeof(buf), fmt, args);
35 va_end(args);
36
37 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
38}
39
40#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
25static ssize_t name## _read(struct file *file, char __user *userbuf, \ 41static ssize_t name## _read(struct file *file, char __user *userbuf, \
26 size_t count, loff_t *ppos) \ 42 size_t count, loff_t *ppos) \
27{ \ 43{ \
28 struct ieee80211_local *local = file->private_data; \ 44 struct ieee80211_local *local = file->private_data; \
29 char buf[buflen]; \
30 int res; \
31 \ 45 \
32 res = scnprintf(buf, buflen, fmt "\n", ##value); \ 46 return mac80211_format_buffer(userbuf, count, ppos, \
33 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ 47 fmt "\n", ##value); \
34} \ 48} \
35 \ 49 \
36static const struct file_operations name## _ops = { \ 50static const struct file_operations name## _ops = { \
@@ -46,13 +60,13 @@ static const struct file_operations name## _ops = { \
46 debugfs_create_file(#name, mode, phyd, local, &name## _ops); 60 debugfs_create_file(#name, mode, phyd, local, &name## _ops);
47 61
48 62
49DEBUGFS_READONLY_FILE(frequency, 20, "%d", 63DEBUGFS_READONLY_FILE(frequency, "%d",
50 local->hw.conf.channel->center_freq); 64 local->hw.conf.channel->center_freq);
51DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d", 65DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
52 local->total_ps_buffered); 66 local->total_ps_buffered);
53DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x", 67DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
54 local->wep_iv & 0xffffff); 68 local->wep_iv & 0xffffff);
55DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", 69DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
56 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver"); 70 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
57 71
58static ssize_t tsf_read(struct file *file, char __user *user_buf, 72static ssize_t tsf_read(struct file *file, char __user *user_buf,
@@ -60,13 +74,11 @@ static ssize_t tsf_read(struct file *file, char __user *user_buf,
60{ 74{
61 struct ieee80211_local *local = file->private_data; 75 struct ieee80211_local *local = file->private_data;
62 u64 tsf; 76 u64 tsf;
63 char buf[100];
64 77
65 tsf = drv_get_tsf(local); 78 tsf = drv_get_tsf(local);
66 79
67 snprintf(buf, sizeof(buf), "0x%016llx\n", (unsigned long long) tsf); 80 return mac80211_format_buffer(user_buf, count, ppos, "0x%016llx\n",
68 81 (unsigned long long) tsf);
69 return simple_read_from_buffer(user_buf, count, ppos, buf, 19);
70} 82}
71 83
72static ssize_t tsf_write(struct file *file, 84static ssize_t tsf_write(struct file *file,
@@ -131,12 +143,9 @@ static ssize_t noack_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos) 143 size_t count, loff_t *ppos)
132{ 144{
133 struct ieee80211_local *local = file->private_data; 145 struct ieee80211_local *local = file->private_data;
134 int res;
135 char buf[10];
136 146
137 res = scnprintf(buf, sizeof(buf), "%d\n", local->wifi_wme_noack_test); 147 return mac80211_format_buffer(user_buf, count, ppos, "%d\n",
138 148 local->wifi_wme_noack_test);
139 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
140} 149}
141 150
142static ssize_t noack_write(struct file *file, 151static ssize_t noack_write(struct file *file,
@@ -168,12 +177,8 @@ static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
168 size_t count, loff_t *ppos) 177 size_t count, loff_t *ppos)
169{ 178{
170 struct ieee80211_local *local = file->private_data; 179 struct ieee80211_local *local = file->private_data;
171 int res; 180 return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
172 char buf[10]; 181 local->uapsd_queues);
173
174 res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_queues);
175
176 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
177} 182}
178 183
179static ssize_t uapsd_queues_write(struct file *file, 184static ssize_t uapsd_queues_write(struct file *file,
@@ -215,12 +220,9 @@ static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
215 size_t count, loff_t *ppos) 220 size_t count, loff_t *ppos)
216{ 221{
217 struct ieee80211_local *local = file->private_data; 222 struct ieee80211_local *local = file->private_data;
218 int res;
219 char buf[10];
220 223
221 res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_max_sp_len); 224 return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
222 225 local->uapsd_max_sp_len);
223 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
224} 226}
225 227
226static ssize_t uapsd_max_sp_len_write(struct file *file, 228static ssize_t uapsd_max_sp_len_write(struct file *file,
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index 09cc9be3479..7c87529630f 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -4,6 +4,8 @@
4#ifdef CONFIG_MAC80211_DEBUGFS 4#ifdef CONFIG_MAC80211_DEBUGFS
5extern void debugfs_hw_add(struct ieee80211_local *local); 5extern void debugfs_hw_add(struct ieee80211_local *local);
6extern int mac80211_open_file_generic(struct inode *inode, struct file *file); 6extern int mac80211_open_file_generic(struct inode *inode, struct file *file);
7extern int mac80211_format_buffer(char __user *userbuf, size_t count,
8 loff_t *ppos, char *fmt, ...);
7#else 9#else
8static inline void debugfs_hw_add(struct ieee80211_local *local) 10static inline void debugfs_hw_add(struct ieee80211_local *local)
9{ 11{
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 1243d1db5c5..5822a6ce767 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -15,18 +15,17 @@
15#include "debugfs.h" 15#include "debugfs.h"
16#include "debugfs_key.h" 16#include "debugfs_key.h"
17 17
18#define KEY_READ(name, prop, buflen, format_string) \ 18#define KEY_READ(name, prop, format_string) \
19static ssize_t key_##name##_read(struct file *file, \ 19static ssize_t key_##name##_read(struct file *file, \
20 char __user *userbuf, \ 20 char __user *userbuf, \
21 size_t count, loff_t *ppos) \ 21 size_t count, loff_t *ppos) \
22{ \ 22{ \
23 char buf[buflen]; \
24 struct ieee80211_key *key = file->private_data; \ 23 struct ieee80211_key *key = file->private_data; \
25 int res = scnprintf(buf, buflen, format_string, key->prop); \ 24 return mac80211_format_buffer(userbuf, count, ppos, \
26 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ 25 format_string, key->prop); \
27} 26}
28#define KEY_READ_D(name) KEY_READ(name, name, 20, "%d\n") 27#define KEY_READ_D(name) KEY_READ(name, name, "%d\n")
29#define KEY_READ_X(name) KEY_READ(name, name, 20, "0x%x\n") 28#define KEY_READ_X(name) KEY_READ(name, name, "0x%x\n")
30 29
31#define KEY_OPS(name) \ 30#define KEY_OPS(name) \
32static const struct file_operations key_ ##name## _ops = { \ 31static const struct file_operations key_ ##name## _ops = { \
@@ -39,9 +38,9 @@ static const struct file_operations key_ ##name## _ops = { \
39 KEY_READ_##format(name) \ 38 KEY_READ_##format(name) \
40 KEY_OPS(name) 39 KEY_OPS(name)
41 40
42#define KEY_CONF_READ(name, buflen, format_string) \ 41#define KEY_CONF_READ(name, format_string) \
43 KEY_READ(conf_##name, conf.name, buflen, format_string) 42 KEY_READ(conf_##name, conf.name, format_string)
44#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, 20, "%d\n") 43#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, "%d\n")
45 44
46#define KEY_CONF_OPS(name) \ 45#define KEY_CONF_OPS(name) \
47static const struct file_operations key_ ##name## _ops = { \ 46static const struct file_operations key_ ##name## _ops = { \
@@ -59,7 +58,7 @@ KEY_CONF_FILE(keyidx, D);
59KEY_CONF_FILE(hw_key_idx, D); 58KEY_CONF_FILE(hw_key_idx, D);
60KEY_FILE(flags, X); 59KEY_FILE(flags, X);
61KEY_FILE(tx_rx_count, D); 60KEY_FILE(tx_rx_count, D);
62KEY_READ(ifindex, sdata->name, IFNAMSIZ + 2, "%s\n"); 61KEY_READ(ifindex, sdata->name, "%s\n");
63KEY_OPS(ifindex); 62KEY_OPS(ifindex);
64 63
65static ssize_t key_algorithm_read(struct file *file, 64static ssize_t key_algorithm_read(struct file *file,
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 4601fea1784..f0fce37f406 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -17,20 +17,18 @@
17 17
18/* sta attributtes */ 18/* sta attributtes */
19 19
20#define STA_READ(name, buflen, field, format_string) \ 20#define STA_READ(name, field, format_string) \
21static ssize_t sta_ ##name## _read(struct file *file, \ 21static ssize_t sta_ ##name## _read(struct file *file, \
22 char __user *userbuf, \ 22 char __user *userbuf, \
23 size_t count, loff_t *ppos) \ 23 size_t count, loff_t *ppos) \
24{ \ 24{ \
25 int res; \
26 struct sta_info *sta = file->private_data; \ 25 struct sta_info *sta = file->private_data; \
27 char buf[buflen]; \ 26 return mac80211_format_buffer(userbuf, count, ppos, \
28 res = scnprintf(buf, buflen, format_string, sta->field); \ 27 format_string, sta->field); \
29 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
30} 28}
31#define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n") 29#define STA_READ_D(name, field) STA_READ(name, field, "%d\n")
32#define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n") 30#define STA_READ_U(name, field) STA_READ(name, field, "%u\n")
33#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") 31#define STA_READ_S(name, field) STA_READ(name, field, "%s\n")
34 32
35#define STA_OPS(name) \ 33#define STA_OPS(name) \
36static const struct file_operations sta_ ##name## _ops = { \ 34static const struct file_operations sta_ ##name## _ops = { \
@@ -79,22 +77,18 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
79 char __user *userbuf, 77 char __user *userbuf,
80 size_t count, loff_t *ppos) 78 size_t count, loff_t *ppos)
81{ 79{
82 char buf[20];
83 struct sta_info *sta = file->private_data; 80 struct sta_info *sta = file->private_data;
84 int res = scnprintf(buf, sizeof(buf), "%u\n", 81 return mac80211_format_buffer(userbuf, count, ppos, "%u\n",
85 skb_queue_len(&sta->ps_tx_buf)); 82 skb_queue_len(&sta->ps_tx_buf));
86 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
87} 83}
88STA_OPS(num_ps_buf_frames); 84STA_OPS(num_ps_buf_frames);
89 85
90static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf, 86static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
91 size_t count, loff_t *ppos) 87 size_t count, loff_t *ppos)
92{ 88{
93 char buf[20];
94 struct sta_info *sta = file->private_data; 89 struct sta_info *sta = file->private_data;
95 int res = scnprintf(buf, sizeof(buf), "%d\n", 90 return mac80211_format_buffer(userbuf, count, ppos, "%d\n",
96 jiffies_to_msecs(jiffies - sta->last_rx)); 91 jiffies_to_msecs(jiffies - sta->last_rx));
97 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
98} 92}
99STA_OPS(inactive_ms); 93STA_OPS(inactive_ms);
100 94
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 16983825f8e..4244554d218 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -233,6 +233,20 @@ static inline void drv_get_tkip_seq(struct ieee80211_local *local,
233 trace_drv_get_tkip_seq(local, hw_key_idx, iv32, iv16); 233 trace_drv_get_tkip_seq(local, hw_key_idx, iv32, iv16);
234} 234}
235 235
236static inline int drv_set_frag_threshold(struct ieee80211_local *local,
237 u32 value)
238{
239 int ret = 0;
240
241 might_sleep();
242
243 trace_drv_set_frag_threshold(local, value);
244 if (local->ops->set_frag_threshold)
245 ret = local->ops->set_frag_threshold(&local->hw, value);
246 trace_drv_return_int(local, ret);
247 return ret;
248}
249
236static inline int drv_set_rts_threshold(struct ieee80211_local *local, 250static inline int drv_set_rts_threshold(struct ieee80211_local *local,
237 u32 value) 251 u32 value)
238{ 252{
@@ -428,4 +442,27 @@ static inline void drv_channel_switch(struct ieee80211_local *local,
428 trace_drv_return_void(local); 442 trace_drv_return_void(local);
429} 443}
430 444
445
446static inline int drv_set_antenna(struct ieee80211_local *local,
447 u32 tx_ant, u32 rx_ant)
448{
449 int ret = -EOPNOTSUPP;
450 might_sleep();
451 if (local->ops->set_antenna)
452 ret = local->ops->set_antenna(&local->hw, tx_ant, rx_ant);
453 trace_drv_set_antenna(local, tx_ant, rx_ant, ret);
454 return ret;
455}
456
457static inline int drv_get_antenna(struct ieee80211_local *local,
458 u32 *tx_ant, u32 *rx_ant)
459{
460 int ret = -EOPNOTSUPP;
461 might_sleep();
462 if (local->ops->get_antenna)
463 ret = local->ops->get_antenna(&local->hw, tx_ant, rx_ant);
464 trace_drv_get_antenna(local, *tx_ant, *rx_ant, ret);
465 return ret;
466}
467
431#endif /* __MAC80211_DRIVER_OPS */ 468#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 6831fb1641c..c2772f23ac9 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -531,6 +531,27 @@ TRACE_EVENT(drv_get_tkip_seq,
531 ) 531 )
532); 532);
533 533
534TRACE_EVENT(drv_set_frag_threshold,
535 TP_PROTO(struct ieee80211_local *local, u32 value),
536
537 TP_ARGS(local, value),
538
539 TP_STRUCT__entry(
540 LOCAL_ENTRY
541 __field(u32, value)
542 ),
543
544 TP_fast_assign(
545 LOCAL_ASSIGN;
546 __entry->value = value;
547 ),
548
549 TP_printk(
550 LOCAL_PR_FMT " value:%d",
551 LOCAL_PR_ARG, __entry->value
552 )
553);
554
534TRACE_EVENT(drv_set_rts_threshold, 555TRACE_EVENT(drv_set_rts_threshold,
535 TP_PROTO(struct ieee80211_local *local, u32 value), 556 TP_PROTO(struct ieee80211_local *local, u32 value),
536 557
@@ -862,6 +883,56 @@ TRACE_EVENT(drv_channel_switch,
862 ) 883 )
863); 884);
864 885
886TRACE_EVENT(drv_set_antenna,
887 TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret),
888
889 TP_ARGS(local, tx_ant, rx_ant, ret),
890
891 TP_STRUCT__entry(
892 LOCAL_ENTRY
893 __field(u32, tx_ant)
894 __field(u32, rx_ant)
895 __field(int, ret)
896 ),
897
898 TP_fast_assign(
899 LOCAL_ASSIGN;
900 __entry->tx_ant = tx_ant;
901 __entry->rx_ant = rx_ant;
902 __entry->ret = ret;
903 ),
904
905 TP_printk(
906 LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d",
907 LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret
908 )
909);
910
911TRACE_EVENT(drv_get_antenna,
912 TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret),
913
914 TP_ARGS(local, tx_ant, rx_ant, ret),
915
916 TP_STRUCT__entry(
917 LOCAL_ENTRY
918 __field(u32, tx_ant)
919 __field(u32, rx_ant)
920 __field(int, ret)
921 ),
922
923 TP_fast_assign(
924 LOCAL_ASSIGN;
925 __entry->tx_ant = tx_ant;
926 __entry->rx_ant = rx_ant;
927 __entry->ret = ret;
928 ),
929
930 TP_printk(
931 LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d",
932 LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret
933 )
934);
935
865/* 936/*
866 * Tracing for API calls that drivers call. 937 * Tracing for API calls that drivers call.
867 */ 938 */
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 239c4836a94..410d104b134 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -915,6 +915,8 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
915 915
916 sdata->u.ibss.privacy = params->privacy; 916 sdata->u.ibss.privacy = params->privacy;
917 sdata->u.ibss.basic_rates = params->basic_rates; 917 sdata->u.ibss.basic_rates = params->basic_rates;
918 memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
919 sizeof(params->mcast_rate));
918 920
919 sdata->vif.bss_conf.beacon_int = params->beacon_interval; 921 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
920 922
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b80c3868992..5bc0745368f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -349,6 +349,7 @@ struct ieee80211_if_managed {
349 struct work_struct chswitch_work; 349 struct work_struct chswitch_work;
350 struct work_struct beacon_connection_loss_work; 350 struct work_struct beacon_connection_loss_work;
351 351
352 unsigned long beacon_timeout;
352 unsigned long probe_timeout; 353 unsigned long probe_timeout;
353 int probe_send_count; 354 int probe_send_count;
354 355
@@ -1264,6 +1265,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
1264 int powersave); 1265 int powersave);
1265void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 1266void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1266 struct ieee80211_hdr *hdr); 1267 struct ieee80211_hdr *hdr);
1268void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
1269 struct ieee80211_hdr *hdr);
1267void ieee80211_beacon_connection_loss_work(struct work_struct *work); 1270void ieee80211_beacon_connection_loss_work(struct work_struct *work);
1268 1271
1269void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, 1272void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
@@ -1278,6 +1281,9 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
1278 struct sk_buff *skb); 1281 struct sk_buff *skb);
1279int ieee80211_add_pending_skbs(struct ieee80211_local *local, 1282int ieee80211_add_pending_skbs(struct ieee80211_local *local,
1280 struct sk_buff_head *skbs); 1283 struct sk_buff_head *skbs);
1284int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
1285 struct sk_buff_head *skbs,
1286 void (*fn)(void *data), void *data);
1281 1287
1282void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1288void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1283 u16 transaction, u16 auth_alg, 1289 u16 transaction, u16 auth_alg,
@@ -1287,6 +1293,10 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1287 const u8 *ie, size_t ie_len, 1293 const u8 *ie, size_t ie_len,
1288 enum ieee80211_band band, u32 rate_mask, 1294 enum ieee80211_band band, u32 rate_mask,
1289 u8 channel); 1295 u8 channel);
1296struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1297 u8 *dst,
1298 const u8 *ssid, size_t ssid_len,
1299 const u8 *ie, size_t ie_len);
1290void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1300void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1291 const u8 *ssid, size_t ssid_len, 1301 const u8 *ssid, size_t ssid_len,
1292 const u8 *ie, size_t ie_len); 1302 const u8 *ie, size_t ie_len);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ccd676b2f59..72df1ca7299 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -84,10 +84,17 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
84 goto out_unsupported; 84 goto out_unsupported;
85 85
86 sdata = key->sdata; 86 sdata = key->sdata;
87 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 87 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
88 /*
89 * The driver doesn't know anything about VLAN interfaces.
90 * Hence, don't send GTKs for VLAN interfaces to the driver.
91 */
92 if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
93 goto out_unsupported;
88 sdata = container_of(sdata->bss, 94 sdata = container_of(sdata->bss,
89 struct ieee80211_sub_if_data, 95 struct ieee80211_sub_if_data,
90 u.ap); 96 u.ap);
97 }
91 98
92 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); 99 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
93 100
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a3a9421555a..79480791494 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -28,13 +28,19 @@
28#include "rate.h" 28#include "rate.h"
29#include "led.h" 29#include "led.h"
30 30
31#define IEEE80211_MAX_NULLFUNC_TRIES 2
31#define IEEE80211_MAX_PROBE_TRIES 5 32#define IEEE80211_MAX_PROBE_TRIES 5
32 33
33/* 34/*
34 * beacon loss detection timeout 35 * Beacon loss timeout is calculated as N frames times the
35 * XXX: should depend on beacon interval 36 * advertised beacon interval. This may need to be somewhat
37 * higher than what hardware might detect to account for
38 * delays in the host processing frames. But since we also
39 * probe on beacon miss before declaring the connection lost
40 * default to what we want.
36 */ 41 */
37#define IEEE80211_BEACON_LOSS_TIME (2 * HZ) 42#define IEEE80211_BEACON_LOSS_COUNT 7
43
38/* 44/*
39 * Time the connection can be idle before we probe 45 * Time the connection can be idle before we probe
40 * it to see if we can still talk to the AP. 46 * it to see if we can still talk to the AP.
@@ -121,7 +127,7 @@ void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
121 return; 127 return;
122 128
123 mod_timer(&sdata->u.mgd.bcn_mon_timer, 129 mod_timer(&sdata->u.mgd.bcn_mon_timer,
124 round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME)); 130 round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout));
125} 131}
126 132
127void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata) 133void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
@@ -871,6 +877,9 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
871 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 877 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
872 cbss->capability, bss->has_erp_value, bss->erp_value); 878 cbss->capability, bss->has_erp_value, bss->erp_value);
873 879
880 sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec(
881 IEEE80211_BEACON_LOSS_COUNT * bss_conf->beacon_int));
882
874 sdata->u.mgd.associated = cbss; 883 sdata->u.mgd.associated = cbss;
875 memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN); 884 memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
876 885
@@ -1026,6 +1035,51 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1026 ieee80211_sta_reset_conn_monitor(sdata); 1035 ieee80211_sta_reset_conn_monitor(sdata);
1027} 1036}
1028 1037
1038static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
1039{
1040 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1041
1042 if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
1043 IEEE80211_STA_CONNECTION_POLL)))
1044 return;
1045
1046 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
1047 IEEE80211_STA_BEACON_POLL);
1048 mutex_lock(&sdata->local->iflist_mtx);
1049 ieee80211_recalc_ps(sdata->local, -1);
1050 mutex_unlock(&sdata->local->iflist_mtx);
1051
1052 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
1053 return;
1054
1055 /*
1056 * We've received a probe response, but are not sure whether
1057 * we have or will be receiving any beacons or data, so let's
1058 * schedule the timers again, just in case.
1059 */
1060 ieee80211_sta_reset_beacon_monitor(sdata);
1061
1062 mod_timer(&ifmgd->conn_mon_timer,
1063 round_jiffies_up(jiffies +
1064 IEEE80211_CONNECTION_IDLE_TIME));
1065}
1066
1067void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
1068 struct ieee80211_hdr *hdr)
1069{
1070 if (!ieee80211_is_data(hdr->frame_control) &&
1071 !ieee80211_is_nullfunc(hdr->frame_control))
1072 return;
1073
1074 ieee80211_sta_reset_conn_monitor(sdata);
1075
1076 if (ieee80211_is_nullfunc(hdr->frame_control) &&
1077 sdata->u.mgd.probe_send_count > 0) {
1078 sdata->u.mgd.probe_send_count = 0;
1079 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1080 }
1081}
1082
1029static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) 1083static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1030{ 1084{
1031 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1085 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1041,8 +1095,19 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1041 if (ifmgd->probe_send_count >= unicast_limit) 1095 if (ifmgd->probe_send_count >= unicast_limit)
1042 dst = NULL; 1096 dst = NULL;
1043 1097
1044 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1098 /*
1045 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0); 1099 * When the hardware reports an accurate Tx ACK status, it's
1100 * better to send a nullfunc frame instead of a probe request,
1101 * as it will kick us off the AP quickly if we aren't associated
1102 * anymore. The timeout will be reset if the frame is ACKed by
1103 * the AP.
1104 */
1105 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
1106 ieee80211_send_nullfunc(sdata->local, sdata, 0);
1107 else {
1108 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1109 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0);
1110 }
1046 1111
1047 ifmgd->probe_send_count++; 1112 ifmgd->probe_send_count++;
1048 ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT; 1113 ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
@@ -1108,6 +1173,30 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1108 mutex_unlock(&ifmgd->mtx); 1173 mutex_unlock(&ifmgd->mtx);
1109} 1174}
1110 1175
1176struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1177 struct ieee80211_vif *vif)
1178{
1179 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1180 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1181 struct sk_buff *skb;
1182 const u8 *ssid;
1183
1184 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
1185 return NULL;
1186
1187 ASSERT_MGD_MTX(ifmgd);
1188
1189 if (!ifmgd->associated)
1190 return NULL;
1191
1192 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1193 skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid,
1194 ssid + 2, ssid[1], NULL, 0);
1195
1196 return skb;
1197}
1198EXPORT_SYMBOL(ieee80211_ap_probereq_get);
1199
1111static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata) 1200static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1112{ 1201{
1113 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1202 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1485,29 +1574,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1485 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 1574 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1486 1575
1487 if (ifmgd->associated && 1576 if (ifmgd->associated &&
1488 memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0 && 1577 memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0)
1489 ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 1578 ieee80211_reset_ap_probe(sdata);
1490 IEEE80211_STA_CONNECTION_POLL)) {
1491 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
1492 IEEE80211_STA_BEACON_POLL);
1493 mutex_lock(&sdata->local->iflist_mtx);
1494 ieee80211_recalc_ps(sdata->local, -1);
1495 mutex_unlock(&sdata->local->iflist_mtx);
1496
1497 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
1498 return;
1499
1500 /*
1501 * We've received a probe response, but are not sure whether
1502 * we have or will be receiving any beacons or data, so let's
1503 * schedule the timers again, just in case.
1504 */
1505 ieee80211_sta_reset_beacon_monitor(sdata);
1506
1507 mod_timer(&ifmgd->conn_mon_timer,
1508 round_jiffies_up(jiffies +
1509 IEEE80211_CONNECTION_IDLE_TIME));
1510 }
1511} 1579}
1512 1580
1513/* 1581/*
@@ -1857,12 +1925,23 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
1857 IEEE80211_STA_CONNECTION_POLL) && 1925 IEEE80211_STA_CONNECTION_POLL) &&
1858 ifmgd->associated) { 1926 ifmgd->associated) {
1859 u8 bssid[ETH_ALEN]; 1927 u8 bssid[ETH_ALEN];
1928 int max_tries;
1860 1929
1861 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); 1930 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
1862 if (time_is_after_jiffies(ifmgd->probe_timeout)) 1931
1932 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
1933 max_tries = IEEE80211_MAX_NULLFUNC_TRIES;
1934 else
1935 max_tries = IEEE80211_MAX_PROBE_TRIES;
1936
1937 /* ACK received for nullfunc probing frame */
1938 if (!ifmgd->probe_send_count)
1939 ieee80211_reset_ap_probe(sdata);
1940
1941 else if (time_is_after_jiffies(ifmgd->probe_timeout))
1863 run_again(ifmgd, ifmgd->probe_timeout); 1942 run_again(ifmgd, ifmgd->probe_timeout);
1864 1943
1865 else if (ifmgd->probe_send_count < IEEE80211_MAX_PROBE_TRIES) { 1944 else if (ifmgd->probe_send_count < max_tries) {
1866#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1945#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1867 wiphy_debug(local->hw.wiphy, 1946 wiphy_debug(local->hw.wiphy,
1868 "%s: No probe response from AP %pM" 1947 "%s: No probe response from AP %pM"
@@ -1988,6 +2067,8 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
1988 add_timer(&ifmgd->timer); 2067 add_timer(&ifmgd->timer);
1989 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) 2068 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
1990 add_timer(&ifmgd->chswitch_timer); 2069 add_timer(&ifmgd->chswitch_timer);
2070 ieee80211_sta_reset_beacon_monitor(sdata);
2071 ieee80211_restart_sta_timer(sdata);
1991} 2072}
1992#endif 2073#endif
1993 2074
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 33f76993da0..3d5a2cb835c 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -211,7 +211,8 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
211 return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc); 211 return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc);
212} 212}
213 213
214static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx) 214static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
215 struct ieee80211_supported_band *sband)
215{ 216{
216 u8 i; 217 u8 i;
217 218
@@ -222,7 +223,7 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
222 if (basic_rates & (1 << *idx)) 223 if (basic_rates & (1 << *idx))
223 return; /* selected rate is a basic rate */ 224 return; /* selected rate is a basic rate */
224 225
225 for (i = *idx + 1; i <= max_rate_idx; i++) { 226 for (i = *idx + 1; i <= sband->n_bitrates; i++) {
226 if (basic_rates & (1 << i)) { 227 if (basic_rates & (1 << i)) {
227 *idx = i; 228 *idx = i;
228 return; 229 return;
@@ -237,16 +238,25 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
237 struct ieee80211_tx_rate_control *txrc) 238 struct ieee80211_tx_rate_control *txrc)
238{ 239{
239 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); 240 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
241 struct ieee80211_supported_band *sband = txrc->sband;
242 int mcast_rate;
240 243
241 if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) { 244 if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) {
242 info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta); 245 info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta);
243 info->control.rates[0].count = 246 info->control.rates[0].count =
244 (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 247 (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
245 1 : txrc->hw->max_rate_tries; 248 1 : txrc->hw->max_rate_tries;
246 if (!sta && txrc->ap) 249 if (!sta && txrc->bss) {
250 mcast_rate = txrc->bss_conf->mcast_rate[sband->band];
251 if (mcast_rate > 0) {
252 info->control.rates[0].idx = mcast_rate - 1;
253 return true;
254 }
255
247 rc_send_low_broadcast(&info->control.rates[0].idx, 256 rc_send_low_broadcast(&info->control.rates[0].idx,
248 txrc->bss_conf->basic_rates, 257 txrc->bss_conf->basic_rates,
249 txrc->sband->n_bitrates); 258 sband);
259 }
250 return true; 260 return true;
251 } 261 }
252 return false; 262 return false;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 2a18d6602d4..4ad7a362fcc 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -371,6 +371,9 @@ minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, stru
371 if (likely(sta->ampdu_mlme.tid_tx[tid])) 371 if (likely(sta->ampdu_mlme.tid_tx[tid]))
372 return; 372 return;
373 373
374 if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
375 return;
376
374 ieee80211_start_tx_ba_session(pubsta, tid); 377 ieee80211_start_tx_ba_session(pubsta, tid);
375} 378}
376 379
@@ -407,8 +410,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
407 mi->ampdu_len += info->status.ampdu_len; 410 mi->ampdu_len += info->status.ampdu_len;
408 411
409 if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { 412 if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
410 mi->sample_wait = 4 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len); 413 mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
411 mi->sample_tries = 3; 414 mi->sample_tries = 2;
412 mi->sample_count--; 415 mi->sample_count--;
413 } 416 }
414 417
@@ -506,7 +509,9 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
506 if (!mr->retry_updated) 509 if (!mr->retry_updated)
507 minstrel_calc_retransmit(mp, mi, index); 510 minstrel_calc_retransmit(mp, mi, index);
508 511
509 if (mr->probability < MINSTREL_FRAC(20, 100)) 512 if (sample)
513 rate->count = 1;
514 else if (mr->probability < MINSTREL_FRAC(20, 100))
510 rate->count = 2; 515 rate->count = 2;
511 else if (rtscts) 516 else if (rtscts)
512 rate->count = mr->retry_count_rtscts; 517 rate->count = mr->retry_count_rtscts;
@@ -562,7 +567,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
562 */ 567 */
563 if (minstrel_get_duration(sample_idx) > 568 if (minstrel_get_duration(sample_idx) >
564 minstrel_get_duration(mi->max_tp_rate)) { 569 minstrel_get_duration(mi->max_tp_rate)) {
565 if (mr->sample_skipped < 10) 570 if (mr->sample_skipped < 20)
566 goto next; 571 goto next;
567 572
568 if (mi->sample_slow++ > 2) 573 if (mi->sample_slow++ > 2)
@@ -586,6 +591,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
586 struct minstrel_ht_sta *mi = &msp->ht; 591 struct minstrel_ht_sta *mi = &msp->ht;
587 struct minstrel_priv *mp = priv; 592 struct minstrel_priv *mp = priv;
588 int sample_idx; 593 int sample_idx;
594 bool sample = false;
589 595
590 if (rate_control_send_low(sta, priv_sta, txrc)) 596 if (rate_control_send_low(sta, priv_sta, txrc))
591 return; 597 return;
@@ -596,10 +602,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
596 info->flags |= mi->tx_flags; 602 info->flags |= mi->tx_flags;
597 sample_idx = minstrel_get_sample_rate(mp, mi); 603 sample_idx = minstrel_get_sample_rate(mp, mi);
598 if (sample_idx >= 0) { 604 if (sample_idx >= 0) {
605 sample = true;
599 minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx, 606 minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
600 txrc, true, false); 607 txrc, true, false);
601 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate, 608 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
602 txrc, false, true); 609 txrc, false, false);
603 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; 610 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
604 } else { 611 } else {
605 minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate, 612 minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
@@ -607,7 +614,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
607 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2, 614 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
608 txrc, false, true); 615 txrc, false, true);
609 } 616 }
610 minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, true); 617 minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, !sample);
611 618
612 ar[3].count = 0; 619 ar[3].count = 0;
613 ar[3].idx = -1; 620 ar[3].idx = -1;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 54fb4a0e76f..55337709de4 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1102,8 +1102,6 @@ static void ap_sta_ps_end(struct sta_info *sta)
1102 1102
1103 atomic_dec(&sdata->bss->num_sta_ps); 1103 atomic_dec(&sdata->bss->num_sta_ps);
1104 1104
1105 clear_sta_flags(sta, WLAN_STA_PS_STA);
1106
1107#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1105#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1108 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1106 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1109 sdata->name, sta->sta.addr, sta->sta.aid); 1107 sdata->name, sta->sta.addr, sta->sta.aid);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 6d8f897d876..eff58571fd7 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -199,8 +199,11 @@ static void sta_unblock(struct work_struct *wk)
199 199
200 if (!test_sta_flags(sta, WLAN_STA_PS_STA)) 200 if (!test_sta_flags(sta, WLAN_STA_PS_STA))
201 ieee80211_sta_ps_deliver_wakeup(sta); 201 ieee80211_sta_ps_deliver_wakeup(sta);
202 else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) 202 else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) {
203 clear_sta_flags(sta, WLAN_STA_PS_DRIVER);
203 ieee80211_sta_ps_deliver_poll_response(sta); 204 ieee80211_sta_ps_deliver_poll_response(sta);
205 } else
206 clear_sta_flags(sta, WLAN_STA_PS_DRIVER);
204} 207}
205 208
206static int sta_prepare_rate_control(struct ieee80211_local *local, 209static int sta_prepare_rate_control(struct ieee80211_local *local,
@@ -880,6 +883,13 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
880} 883}
881EXPORT_SYMBOL(ieee80211_find_sta); 884EXPORT_SYMBOL(ieee80211_find_sta);
882 885
886static void clear_sta_ps_flags(void *_sta)
887{
888 struct sta_info *sta = _sta;
889
890 clear_sta_flags(sta, WLAN_STA_PS_DRIVER | WLAN_STA_PS_STA);
891}
892
883/* powersave support code */ 893/* powersave support code */
884void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 894void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
885{ 895{
@@ -894,7 +904,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
894 904
895 /* Send all buffered frames to the station */ 905 /* Send all buffered frames to the station */
896 sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered); 906 sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
897 buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf); 907 buffered = ieee80211_add_pending_skbs_fn(local, &sta->ps_tx_buf,
908 clear_sta_ps_flags, sta);
898 sent += buffered; 909 sent += buffered;
899 local->total_ps_buffered -= buffered; 910 local->total_ps_buffered -= buffered;
900 911
@@ -973,7 +984,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
973 984
974 if (block) 985 if (block)
975 set_sta_flags(sta, WLAN_STA_PS_DRIVER); 986 set_sta_flags(sta, WLAN_STA_PS_DRIVER);
976 else 987 else if (test_sta_flags(sta, WLAN_STA_PS_DRIVER))
977 ieee80211_queue_work(hw, &sta->drv_unblock_wk); 988 ieee80211_queue_work(hw, &sta->drv_unblock_wk);
978} 989}
979EXPORT_SYMBOL(ieee80211_sta_block_awake); 990EXPORT_SYMBOL(ieee80211_sta_block_awake);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 9265acadef3..b562d9b6a70 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -248,6 +248,7 @@ enum plink_state {
248 * @sta: station information we share with the driver 248 * @sta: station information we share with the driver
249 * @dead: set to true when sta is unlinked 249 * @dead: set to true when sta is unlinked
250 * @uploaded: set to true when sta is uploaded to the driver 250 * @uploaded: set to true when sta is uploaded to the driver
251 * @lost_packets: number of consecutive lost packets
251 */ 252 */
252struct sta_info { 253struct sta_info {
253 /* General information, mostly static */ 254 /* General information, mostly static */
@@ -335,6 +336,8 @@ struct sta_info {
335 } debugfs; 336 } debugfs;
336#endif 337#endif
337 338
339 unsigned int lost_packets;
340
338 /* keep last! */ 341 /* keep last! */
339 struct ieee80211_sta sta; 342 struct ieee80211_sta sta;
340}; 343};
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 3153c19893b..bed7e32ed90 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -155,8 +155,21 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
155 155
156 ieee80211_queue_work(&local->hw, &local->recalc_smps); 156 ieee80211_queue_work(&local->hw, &local->recalc_smps);
157 } 157 }
158
159 if ((sdata->vif.type == NL80211_IFTYPE_STATION) &&
160 (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
161 ieee80211_sta_tx_notify(sdata, (void *) skb->data);
158} 162}
159 163
164/*
165 * Use a static threshold for now, best value to be determined
166 * by testing ...
167 * Should it depend on:
168 * - on # of retransmissions
169 * - current throughput (higher value for higher tpt)?
170 */
171#define STA_LOST_PKT_THRESHOLD 50
172
160void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) 173void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
161{ 174{
162 struct sk_buff *skb2; 175 struct sk_buff *skb2;
@@ -243,6 +256,19 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
243 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && 256 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
244 (info->flags & IEEE80211_TX_STAT_ACK)) 257 (info->flags & IEEE80211_TX_STAT_ACK))
245 ieee80211_frame_acked(sta, skb); 258 ieee80211_frame_acked(sta, skb);
259
260 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
261 if (info->flags & IEEE80211_TX_STAT_ACK) {
262 if (sta->lost_packets)
263 sta->lost_packets = 0;
264 } else if (++sta->lost_packets >= STA_LOST_PKT_THRESHOLD) {
265 cfg80211_cqm_pktloss_notify(sta->sdata->dev,
266 sta->sta.addr,
267 sta->lost_packets,
268 GFP_ATOMIC);
269 sta->lost_packets = 0;
270 }
271 }
246 } 272 }
247 273
248 rcu_read_unlock(); 274 rcu_read_unlock();
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index df6aac52353..2ba74265682 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -622,7 +622,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
622 txrc.max_rate_idx = -1; 622 txrc.max_rate_idx = -1;
623 else 623 else
624 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 624 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
625 txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP; 625 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
626 tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
626 627
627 /* set up RTS protection if desired */ 628 /* set up RTS protection if desired */
628 if (len > tx->local->hw.wiphy->rts_threshold) { 629 if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -1033,6 +1034,7 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1033 struct ieee80211_radiotap_header *rthdr = 1034 struct ieee80211_radiotap_header *rthdr =
1034 (struct ieee80211_radiotap_header *) skb->data; 1035 (struct ieee80211_radiotap_header *) skb->data;
1035 struct ieee80211_supported_band *sband; 1036 struct ieee80211_supported_band *sband;
1037 bool hw_frag;
1036 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1038 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1037 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len, 1039 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1038 NULL); 1040 NULL);
@@ -1042,6 +1044,9 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1042 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1044 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1043 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 1045 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1044 1046
1047 /* packet is fragmented in HW if we have a non-NULL driver callback */
1048 hw_frag = (tx->local->ops->set_frag_threshold != NULL);
1049
1045 /* 1050 /*
1046 * for every radiotap entry that is present 1051 * for every radiotap entry that is present
1047 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more 1052 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
@@ -1078,7 +1083,8 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1078 } 1083 }
1079 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) 1084 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
1080 info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT; 1085 info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
1081 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) 1086 if ((*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) &&
1087 !hw_frag)
1082 tx->flags |= IEEE80211_TX_FRAGMENTED; 1088 tx->flags |= IEEE80211_TX_FRAGMENTED;
1083 break; 1089 break;
1084 1090
@@ -1181,8 +1187,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1181 /* 1187 /*
1182 * Set this flag (used below to indicate "automatic fragmentation"), 1188 * Set this flag (used below to indicate "automatic fragmentation"),
1183 * it will be cleared/left by radiotap as desired. 1189 * it will be cleared/left by radiotap as desired.
1190 * Only valid when fragmentation is done by the stack.
1184 */ 1191 */
1185 tx->flags |= IEEE80211_TX_FRAGMENTED; 1192 if (!local->ops->set_frag_threshold)
1193 tx->flags |= IEEE80211_TX_FRAGMENTED;
1186 1194
1187 /* process and remove the injection radiotap header */ 1195 /* process and remove the injection radiotap header */
1188 if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) { 1196 if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
@@ -2306,7 +2314,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2306 txrc.max_rate_idx = -1; 2314 txrc.max_rate_idx = -1;
2307 else 2315 else
2308 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 2316 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
2309 txrc.ap = true; 2317 txrc.bss = true;
2310 rate_control_get_rate(sdata, NULL, &txrc); 2318 rate_control_get_rate(sdata, NULL, &txrc);
2311 2319
2312 info->control.vif = vif; 2320 info->control.vif = vif;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0b6fc92bc0d..e497476174c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -368,8 +368,9 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
368 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 368 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
369} 369}
370 370
371int ieee80211_add_pending_skbs(struct ieee80211_local *local, 371int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
372 struct sk_buff_head *skbs) 372 struct sk_buff_head *skbs,
373 void (*fn)(void *data), void *data)
373{ 374{
374 struct ieee80211_hw *hw = &local->hw; 375 struct ieee80211_hw *hw = &local->hw;
375 struct sk_buff *skb; 376 struct sk_buff *skb;
@@ -394,6 +395,9 @@ int ieee80211_add_pending_skbs(struct ieee80211_local *local,
394 __skb_queue_tail(&local->pending[queue], skb); 395 __skb_queue_tail(&local->pending[queue], skb);
395 } 396 }
396 397
398 if (fn)
399 fn(data);
400
397 for (i = 0; i < hw->queues; i++) 401 for (i = 0; i < hw->queues; i++)
398 __ieee80211_wake_queue(hw, i, 402 __ieee80211_wake_queue(hw, i,
399 IEEE80211_QUEUE_STOP_REASON_SKB_ADD); 403 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
@@ -402,6 +406,12 @@ int ieee80211_add_pending_skbs(struct ieee80211_local *local,
402 return ret; 406 return ret;
403} 407}
404 408
409int ieee80211_add_pending_skbs(struct ieee80211_local *local,
410 struct sk_buff_head *skbs)
411{
412 return ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
413}
414
405void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, 415void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
406 enum queue_stop_reason reason) 416 enum queue_stop_reason reason)
407{ 417{
@@ -1011,9 +1021,10 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1011 return pos - buffer; 1021 return pos - buffer;
1012} 1022}
1013 1023
1014void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1024struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1015 const u8 *ssid, size_t ssid_len, 1025 u8 *dst,
1016 const u8 *ie, size_t ie_len) 1026 const u8 *ssid, size_t ssid_len,
1027 const u8 *ie, size_t ie_len)
1017{ 1028{
1018 struct ieee80211_local *local = sdata->local; 1029 struct ieee80211_local *local = sdata->local;
1019 struct sk_buff *skb; 1030 struct sk_buff *skb;
@@ -1027,7 +1038,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1027 if (!buf) { 1038 if (!buf) {
1028 printk(KERN_DEBUG "%s: failed to allocate temporary IE " 1039 printk(KERN_DEBUG "%s: failed to allocate temporary IE "
1029 "buffer\n", sdata->name); 1040 "buffer\n", sdata->name);
1030 return; 1041 return NULL;
1031 } 1042 }
1032 1043
1033 chan = ieee80211_frequency_to_channel( 1044 chan = ieee80211_frequency_to_channel(
@@ -1050,8 +1061,20 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1050 } 1061 }
1051 1062
1052 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1063 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1053 ieee80211_tx_skb(sdata, skb);
1054 kfree(buf); 1064 kfree(buf);
1065
1066 return skb;
1067}
1068
1069void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1070 const u8 *ssid, size_t ssid_len,
1071 const u8 *ie, size_t ie_len)
1072{
1073 struct sk_buff *skb;
1074
1075 skb = ieee80211_build_probe_req(sdata, dst, ssid, ssid_len, ie, ie_len);
1076 if (skb)
1077 ieee80211_tx_skb(sdata, skb);
1055} 1078}
1056 1079
1057u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1080u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1152,6 +1175,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1152 } 1175 }
1153 mutex_unlock(&local->sta_mtx); 1176 mutex_unlock(&local->sta_mtx);
1154 1177
1178 /* setup fragmentation threshold */
1179 drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
1180
1155 /* setup RTS threshold */ 1181 /* setup RTS threshold */
1156 drv_set_rts_threshold(local, hw->wiphy->rts_threshold); 1182 drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
1157 1183
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 34e6d02da77..58e75bbc1f9 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -21,7 +21,16 @@
21/* Default mapping in classifier to work with default 21/* Default mapping in classifier to work with default
22 * queue setup. 22 * queue setup.
23 */ 23 */
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; 24const int ieee802_1d_to_ac[8] = {
25 IEEE80211_AC_BE,
26 IEEE80211_AC_BK,
27 IEEE80211_AC_BK,
28 IEEE80211_AC_BE,
29 IEEE80211_AC_VI,
30 IEEE80211_AC_VI,
31 IEEE80211_AC_VO,
32 IEEE80211_AC_VO
33};
25 34
26static int wme_downgrade_ac(struct sk_buff *skb) 35static int wme_downgrade_ac(struct sk_buff *skb)
27{ 36{
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 85dabb86be6..32fcbe290c0 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -173,9 +173,11 @@ next_hook:
173 outdev, &elem, okfn, hook_thresh); 173 outdev, &elem, okfn, hook_thresh);
174 if (verdict == NF_ACCEPT || verdict == NF_STOP) { 174 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
175 ret = 1; 175 ret = 1;
176 } else if (verdict == NF_DROP) { 176 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
177 kfree_skb(skb); 177 kfree_skb(skb);
178 ret = -EPERM; 178 ret = -(verdict >> NF_VERDICT_BITS);
179 if (ret == 0)
180 ret = -EPERM;
179 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { 181 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
180 if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn, 182 if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
181 verdict >> NF_VERDICT_BITS)) 183 verdict >> NF_VERDICT_BITS))
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5f5daa30b0a..c6f29363922 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -110,10 +110,8 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
110 struct rt6_info *rt; 110 struct rt6_info *rt;
111 struct flowi fl = { 111 struct flowi fl = {
112 .oif = 0, 112 .oif = 0,
113 .nl_u = { 113 .fl6_dst = *addr,
114 .ip6_u = { 114 .fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
115 .daddr = *addr,
116 .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
117 }; 115 };
118 116
119 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); 117 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index de04ea39cde..5325a3fbe4a 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -96,12 +96,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
96 if (!(rt = (struct rtable *) 96 if (!(rt = (struct rtable *)
97 __ip_vs_dst_check(dest, rtos))) { 97 __ip_vs_dst_check(dest, rtos))) {
98 struct flowi fl = { 98 struct flowi fl = {
99 .oif = 0, 99 .fl4_dst = dest->addr.ip,
100 .nl_u = { 100 .fl4_tos = rtos,
101 .ip4_u = {
102 .daddr = dest->addr.ip,
103 .saddr = 0,
104 .tos = rtos, } },
105 }; 101 };
106 102
107 if (ip_route_output_key(net, &rt, &fl)) { 103 if (ip_route_output_key(net, &rt, &fl)) {
@@ -118,12 +114,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
118 spin_unlock(&dest->dst_lock); 114 spin_unlock(&dest->dst_lock);
119 } else { 115 } else {
120 struct flowi fl = { 116 struct flowi fl = {
121 .oif = 0, 117 .fl4_dst = daddr,
122 .nl_u = { 118 .fl4_tos = rtos,
123 .ip4_u = {
124 .daddr = daddr,
125 .saddr = 0,
126 .tos = rtos, } },
127 }; 119 };
128 120
129 if (ip_route_output_key(net, &rt, &fl)) { 121 if (ip_route_output_key(net, &rt, &fl)) {
@@ -169,7 +161,7 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
169 struct net *net = dev_net(dev); 161 struct net *net = dev_net(dev);
170 struct iphdr *iph = ip_hdr(skb); 162 struct iphdr *iph = ip_hdr(skb);
171 163
172 if (rt->fl.iif) { 164 if (rt_is_input_route(rt)) {
173 unsigned long orefdst = skb->_skb_refdst; 165 unsigned long orefdst = skb->_skb_refdst;
174 166
175 if (ip_route_input(skb, iph->daddr, iph->saddr, 167 if (ip_route_input(skb, iph->daddr, iph->saddr,
@@ -178,14 +170,9 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
178 refdst_drop(orefdst); 170 refdst_drop(orefdst);
179 } else { 171 } else {
180 struct flowi fl = { 172 struct flowi fl = {
181 .oif = 0, 173 .fl4_dst = iph->daddr,
182 .nl_u = { 174 .fl4_src = iph->saddr,
183 .ip4_u = { 175 .fl4_tos = RT_TOS(iph->tos),
184 .daddr = iph->daddr,
185 .saddr = iph->saddr,
186 .tos = RT_TOS(iph->tos),
187 }
188 },
189 .mark = skb->mark, 176 .mark = skb->mark,
190 }; 177 };
191 struct rtable *rt; 178 struct rtable *rt;
@@ -216,12 +203,7 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
216{ 203{
217 struct dst_entry *dst; 204 struct dst_entry *dst;
218 struct flowi fl = { 205 struct flowi fl = {
219 .oif = 0, 206 .fl6_dst = *daddr,
220 .nl_u = {
221 .ip6_u = {
222 .daddr = *daddr,
223 },
224 },
225 }; 207 };
226 208
227 dst = ip6_route_output(net, NULL, &fl); 209 dst = ip6_route_output(net, NULL, &fl);
@@ -552,7 +534,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
552#endif 534#endif
553 535
554 /* From world but DNAT to loopback address? */ 536 /* From world but DNAT to loopback address? */
555 if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) { 537 if (local && ipv4_is_loopback(rt->rt_dst) &&
538 rt_is_input_route(skb_rtable(skb))) {
556 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): " 539 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
557 "stopping DNAT to loopback address"); 540 "stopping DNAT to loopback address");
558 goto tx_error_put; 541 goto tx_error_put;
@@ -1165,7 +1148,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1165#endif 1148#endif
1166 1149
1167 /* From world but DNAT to loopback address? */ 1150 /* From world but DNAT to loopback address? */
1168 if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) { 1151 if (local && ipv4_is_loopback(rt->rt_dst) &&
1152 rt_is_input_route(skb_rtable(skb))) {
1169 IP_VS_DBG(1, "%s(): " 1153 IP_VS_DBG(1, "%s(): "
1170 "stopping DNAT to loopback %pI4\n", 1154 "stopping DNAT to loopback %pI4\n",
1171 __func__, &cp->daddr.ip); 1155 __func__, &cp->daddr.ip);
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 22a2d421e7e..5128a6c4cb2 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -70,9 +70,9 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
70 return false; 70 return false;
71 fl.oif = info->priv->oif; 71 fl.oif = info->priv->oif;
72 } 72 }
73 fl.nl_u.ip4_u.daddr = info->gw.ip; 73 fl.fl4_dst = info->gw.ip;
74 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); 74 fl.fl4_tos = RT_TOS(iph->tos);
75 fl.nl_u.ip4_u.scope = RT_SCOPE_UNIVERSE; 75 fl.fl4_scope = RT_SCOPE_UNIVERSE;
76 if (ip_route_output_key(net, &rt, &fl) != 0) 76 if (ip_route_output_key(net, &rt, &fl) != 0)
77 return false; 77 return false;
78 78
@@ -150,9 +150,9 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
150 return false; 150 return false;
151 fl.oif = info->priv->oif; 151 fl.oif = info->priv->oif;
152 } 152 }
153 fl.nl_u.ip6_u.daddr = info->gw.in6; 153 fl.fl6_dst = info->gw.in6;
154 fl.nl_u.ip6_u.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) | 154 fl.fl6_flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
155 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]; 155 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
156 dst = ip6_route_output(net, NULL, &fl); 156 dst = ip6_route_output(net, NULL, &fl);
157 if (dst == NULL) 157 if (dst == NULL)
158 return false; 158 return false;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8298e676f5a..246a04a1323 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -61,6 +61,7 @@
61#include <linux/kernel.h> 61#include <linux/kernel.h>
62#include <linux/kmod.h> 62#include <linux/kmod.h>
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <linux/vmalloc.h>
64#include <net/net_namespace.h> 65#include <net/net_namespace.h>
65#include <net/ip.h> 66#include <net/ip.h>
66#include <net/protocol.h> 67#include <net/protocol.h>
@@ -163,8 +164,13 @@ struct packet_mreq_max {
163static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 164static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
164 int closing, int tx_ring); 165 int closing, int tx_ring);
165 166
167#define PGV_FROM_VMALLOC 1
168struct pgv {
169 char *buffer;
170};
171
166struct packet_ring_buffer { 172struct packet_ring_buffer {
167 char **pg_vec; 173 struct pgv *pg_vec;
168 unsigned int head; 174 unsigned int head;
169 unsigned int frames_per_block; 175 unsigned int frames_per_block;
170 unsigned int frame_size; 176 unsigned int frame_size;
@@ -217,6 +223,13 @@ struct packet_skb_cb {
217 223
218#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 224#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
219 225
226static inline __pure struct page *pgv_to_page(void *addr)
227{
228 if (is_vmalloc_addr(addr))
229 return vmalloc_to_page(addr);
230 return virt_to_page(addr);
231}
232
220static void __packet_set_status(struct packet_sock *po, void *frame, int status) 233static void __packet_set_status(struct packet_sock *po, void *frame, int status)
221{ 234{
222 union { 235 union {
@@ -229,11 +242,11 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
229 switch (po->tp_version) { 242 switch (po->tp_version) {
230 case TPACKET_V1: 243 case TPACKET_V1:
231 h.h1->tp_status = status; 244 h.h1->tp_status = status;
232 flush_dcache_page(virt_to_page(&h.h1->tp_status)); 245 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
233 break; 246 break;
234 case TPACKET_V2: 247 case TPACKET_V2:
235 h.h2->tp_status = status; 248 h.h2->tp_status = status;
236 flush_dcache_page(virt_to_page(&h.h2->tp_status)); 249 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
237 break; 250 break;
238 default: 251 default:
239 pr_err("TPACKET version not supported\n"); 252 pr_err("TPACKET version not supported\n");
@@ -256,10 +269,10 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
256 h.raw = frame; 269 h.raw = frame;
257 switch (po->tp_version) { 270 switch (po->tp_version) {
258 case TPACKET_V1: 271 case TPACKET_V1:
259 flush_dcache_page(virt_to_page(&h.h1->tp_status)); 272 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
260 return h.h1->tp_status; 273 return h.h1->tp_status;
261 case TPACKET_V2: 274 case TPACKET_V2:
262 flush_dcache_page(virt_to_page(&h.h2->tp_status)); 275 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
263 return h.h2->tp_status; 276 return h.h2->tp_status;
264 default: 277 default:
265 pr_err("TPACKET version not supported\n"); 278 pr_err("TPACKET version not supported\n");
@@ -283,7 +296,8 @@ static void *packet_lookup_frame(struct packet_sock *po,
283 pg_vec_pos = position / rb->frames_per_block; 296 pg_vec_pos = position / rb->frames_per_block;
284 frame_offset = position % rb->frames_per_block; 297 frame_offset = position % rb->frames_per_block;
285 298
286 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size); 299 h.raw = rb->pg_vec[pg_vec_pos].buffer +
300 (frame_offset * rb->frame_size);
287 301
288 if (status != __packet_get_status(po, h.raw)) 302 if (status != __packet_get_status(po, h.raw))
289 return NULL; 303 return NULL;
@@ -503,7 +517,8 @@ out_free:
503 return err; 517 return err;
504} 518}
505 519
506static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk, 520static inline unsigned int run_filter(const struct sk_buff *skb,
521 const struct sock *sk,
507 unsigned int res) 522 unsigned int res)
508{ 523{
509 struct sk_filter *filter; 524 struct sk_filter *filter;
@@ -511,22 +526,22 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
511 rcu_read_lock_bh(); 526 rcu_read_lock_bh();
512 filter = rcu_dereference_bh(sk->sk_filter); 527 filter = rcu_dereference_bh(sk->sk_filter);
513 if (filter != NULL) 528 if (filter != NULL)
514 res = sk_run_filter(skb, filter->insns, filter->len); 529 res = sk_run_filter(skb, filter->insns);
515 rcu_read_unlock_bh(); 530 rcu_read_unlock_bh();
516 531
517 return res; 532 return res;
518} 533}
519 534
520/* 535/*
521 This function makes lazy skb cloning in hope that most of packets 536 * This function makes lazy skb cloning in hope that most of packets
522 are discarded by BPF. 537 * are discarded by BPF.
523 538 *
524 Note tricky part: we DO mangle shared skb! skb->data, skb->len 539 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
525 and skb->cb are mangled. It works because (and until) packets 540 * and skb->cb are mangled. It works because (and until) packets
526 falling here are owned by current CPU. Output packets are cloned 541 * falling here are owned by current CPU. Output packets are cloned
527 by dev_queue_xmit_nit(), input packets are processed by net_bh 542 * by dev_queue_xmit_nit(), input packets are processed by net_bh
528 sequencially, so that if we return skb to original state on exit, 543 * sequencially, so that if we return skb to original state on exit,
529 we will not harm anyone. 544 * we will not harm anyone.
530 */ 545 */
531 546
532static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 547static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -552,11 +567,11 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
552 567
553 if (dev->header_ops) { 568 if (dev->header_ops) {
554 /* The device has an explicit notion of ll header, 569 /* The device has an explicit notion of ll header,
555 exported to higher levels. 570 * exported to higher levels.
556 571 *
557 Otherwise, the device hides datails of it frame 572 * Otherwise, the device hides details of its frame
558 structure, so that corresponding packet head 573 * structure, so that corresponding packet head is
559 never delivered to user. 574 * never delivered to user.
560 */ 575 */
561 if (sk->sk_type != SOCK_DGRAM) 576 if (sk->sk_type != SOCK_DGRAM)
562 skb_push(skb, skb->data - skb_mac_header(skb)); 577 skb_push(skb, skb->data - skb_mac_header(skb));
@@ -791,17 +806,15 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
791 806
792 __packet_set_status(po, h.raw, status); 807 __packet_set_status(po, h.raw, status);
793 smp_mb(); 808 smp_mb();
809#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
794 { 810 {
795 struct page *p_start, *p_end; 811 u8 *start, *end;
796 u8 *h_end = h.raw + macoff + snaplen - 1; 812
797 813 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
798 p_start = virt_to_page(h.raw); 814 for (start = h.raw; start < end; start += PAGE_SIZE)
799 p_end = virt_to_page(h_end); 815 flush_dcache_page(pgv_to_page(start));
800 while (p_start <= p_end) {
801 flush_dcache_page(p_start);
802 p_start++;
803 }
804 } 816 }
817#endif
805 818
806 sk->sk_data_ready(sk, 0); 819 sk->sk_data_ready(sk, 0);
807 820
@@ -907,7 +920,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
907 } 920 }
908 921
909 err = -EFAULT; 922 err = -EFAULT;
910 page = virt_to_page(data);
911 offset = offset_in_page(data); 923 offset = offset_in_page(data);
912 len_max = PAGE_SIZE - offset; 924 len_max = PAGE_SIZE - offset;
913 len = ((to_write > len_max) ? len_max : to_write); 925 len = ((to_write > len_max) ? len_max : to_write);
@@ -926,11 +938,11 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
926 return -EFAULT; 938 return -EFAULT;
927 } 939 }
928 940
941 page = pgv_to_page(data);
942 data += len;
929 flush_dcache_page(page); 943 flush_dcache_page(page);
930 get_page(page); 944 get_page(page);
931 skb_fill_page_desc(skb, 945 skb_fill_page_desc(skb, nr_frags, page, offset, len);
932 nr_frags,
933 page++, offset, len);
934 to_write -= len; 946 to_write -= len;
935 offset = 0; 947 offset = 0;
936 len_max = PAGE_SIZE; 948 len_max = PAGE_SIZE;
@@ -2325,37 +2337,70 @@ static const struct vm_operations_struct packet_mmap_ops = {
2325 .close = packet_mm_close, 2337 .close = packet_mm_close,
2326}; 2338};
2327 2339
2328static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len) 2340static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
2341 unsigned int len)
2329{ 2342{
2330 int i; 2343 int i;
2331 2344
2332 for (i = 0; i < len; i++) { 2345 for (i = 0; i < len; i++) {
2333 if (likely(pg_vec[i])) 2346 if (likely(pg_vec[i].buffer)) {
2334 free_pages((unsigned long) pg_vec[i], order); 2347 if (is_vmalloc_addr(pg_vec[i].buffer))
2348 vfree(pg_vec[i].buffer);
2349 else
2350 free_pages((unsigned long)pg_vec[i].buffer,
2351 order);
2352 pg_vec[i].buffer = NULL;
2353 }
2335 } 2354 }
2336 kfree(pg_vec); 2355 kfree(pg_vec);
2337} 2356}
2338 2357
2339static inline char *alloc_one_pg_vec_page(unsigned long order) 2358static inline char *alloc_one_pg_vec_page(unsigned long order)
2340{ 2359{
2341 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN; 2360 char *buffer = NULL;
2361 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
2362 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
2342 2363
2343 return (char *) __get_free_pages(gfp_flags, order); 2364 buffer = (char *) __get_free_pages(gfp_flags, order);
2365
2366 if (buffer)
2367 return buffer;
2368
2369 /*
2370 * __get_free_pages failed, fall back to vmalloc
2371 */
2372 buffer = vzalloc((1 << order) * PAGE_SIZE);
2373
2374 if (buffer)
2375 return buffer;
2376
2377 /*
2378 * vmalloc failed, lets dig into swap here
2379 */
2380 gfp_flags &= ~__GFP_NORETRY;
2381 buffer = (char *)__get_free_pages(gfp_flags, order);
2382 if (buffer)
2383 return buffer;
2384
2385 /*
2386 * complete and utter failure
2387 */
2388 return NULL;
2344} 2389}
2345 2390
2346static char **alloc_pg_vec(struct tpacket_req *req, int order) 2391static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
2347{ 2392{
2348 unsigned int block_nr = req->tp_block_nr; 2393 unsigned int block_nr = req->tp_block_nr;
2349 char **pg_vec; 2394 struct pgv *pg_vec;
2350 int i; 2395 int i;
2351 2396
2352 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL); 2397 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
2353 if (unlikely(!pg_vec)) 2398 if (unlikely(!pg_vec))
2354 goto out; 2399 goto out;
2355 2400
2356 for (i = 0; i < block_nr; i++) { 2401 for (i = 0; i < block_nr; i++) {
2357 pg_vec[i] = alloc_one_pg_vec_page(order); 2402 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
2358 if (unlikely(!pg_vec[i])) 2403 if (unlikely(!pg_vec[i].buffer))
2359 goto out_free_pgvec; 2404 goto out_free_pgvec;
2360 } 2405 }
2361 2406
@@ -2371,7 +2416,7 @@ out_free_pgvec:
2371static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 2416static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2372 int closing, int tx_ring) 2417 int closing, int tx_ring)
2373{ 2418{
2374 char **pg_vec = NULL; 2419 struct pgv *pg_vec = NULL;
2375 struct packet_sock *po = pkt_sk(sk); 2420 struct packet_sock *po = pkt_sk(sk);
2376 int was_running, order = 0; 2421 int was_running, order = 0;
2377 struct packet_ring_buffer *rb; 2422 struct packet_ring_buffer *rb;
@@ -2533,15 +2578,17 @@ static int packet_mmap(struct file *file, struct socket *sock,
2533 continue; 2578 continue;
2534 2579
2535 for (i = 0; i < rb->pg_vec_len; i++) { 2580 for (i = 0; i < rb->pg_vec_len; i++) {
2536 struct page *page = virt_to_page(rb->pg_vec[i]); 2581 struct page *page;
2582 void *kaddr = rb->pg_vec[i].buffer;
2537 int pg_num; 2583 int pg_num;
2538 2584
2539 for (pg_num = 0; pg_num < rb->pg_vec_pages; 2585 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
2540 pg_num++, page++) { 2586 page = pgv_to_page(kaddr);
2541 err = vm_insert_page(vma, start, page); 2587 err = vm_insert_page(vma, start, page);
2542 if (unlikely(err)) 2588 if (unlikely(err))
2543 goto out; 2589 goto out;
2544 start += PAGE_SIZE; 2590 start += PAGE_SIZE;
2591 kaddr += PAGE_SIZE;
2545 } 2592 }
2546 } 2593 }
2547 } 2594 }
diff --git a/net/phonet/Makefile b/net/phonet/Makefile
index d62bbba649b..e10b1b182ce 100644
--- a/net/phonet/Makefile
+++ b/net/phonet/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_PHONET) += phonet.o pn_pep.o 1obj-$(CONFIG_PHONET) += phonet.o pn_pep.o
2 2
3phonet-objs := \ 3phonet-y := \
4 pn_dev.o \ 4 pn_dev.o \
5 pn_netlink.o \ 5 pn_netlink.o \
6 socket.o \ 6 socket.o \
@@ -8,4 +8,4 @@ phonet-objs := \
8 sysctl.o \ 8 sysctl.o \
9 af_phonet.o 9 af_phonet.o
10 10
11pn_pep-objs := pep.o pep-gprs.o 11pn_pep-y := pep.o pep-gprs.o
diff --git a/net/rds/Makefile b/net/rds/Makefile
index b46eca10968..56d3f6023ce 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -4,7 +4,7 @@ rds-y := af_rds.o bind.o cong.o connection.o info.o message.o \
4 loop.o page.o rdma.o 4 loop.o page.o rdma.o
5 5
6obj-$(CONFIG_RDS_RDMA) += rds_rdma.o 6obj-$(CONFIG_RDS_RDMA) += rds_rdma.o
7rds_rdma-objs := rdma_transport.o \ 7rds_rdma-y := rdma_transport.o \
8 ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \ 8 ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \
9 ib_sysctl.o ib_rdma.o \ 9 ib_sysctl.o ib_rdma.o \
10 iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \ 10 iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \
@@ -12,10 +12,8 @@ rds_rdma-objs := rdma_transport.o \
12 12
13 13
14obj-$(CONFIG_RDS_TCP) += rds_tcp.o 14obj-$(CONFIG_RDS_TCP) += rds_tcp.o
15rds_tcp-objs := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \ 15rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
16 tcp_send.o tcp_stats.o 16 tcp_send.o tcp_stats.o
17 17
18ifeq ($(CONFIG_RDS_DEBUG), y) 18ccflags-$(CONFIG_RDS_DEBUG) := -DDEBUG
19EXTRA_CFLAGS += -DDEBUG
20endif
21 19
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 04f599089e6..0198191b756 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -149,20 +149,6 @@ static void rfkill_led_trigger_activate(struct led_classdev *led)
149 rfkill_led_trigger_event(rfkill); 149 rfkill_led_trigger_event(rfkill);
150} 150}
151 151
152const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
153{
154 return rfkill->led_trigger.name;
155}
156EXPORT_SYMBOL(rfkill_get_led_trigger_name);
157
158void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
159{
160 BUG_ON(!rfkill);
161
162 rfkill->ledtrigname = name;
163}
164EXPORT_SYMBOL(rfkill_set_led_trigger_name);
165
166static int rfkill_led_trigger_register(struct rfkill *rfkill) 152static int rfkill_led_trigger_register(struct rfkill *rfkill)
167{ 153{
168 rfkill->led_trigger.name = rfkill->ledtrigname 154 rfkill->led_trigger.name = rfkill->ledtrigname
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index c46867c61c9..d1c3429b69e 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -2,7 +2,7 @@
2# Makefile for Linux kernel RxRPC 2# Makefile for Linux kernel RxRPC
3# 3#
4 4
5af-rxrpc-objs := \ 5af-rxrpc-y := \
6 af_rxrpc.o \ 6 af_rxrpc.o \
7 ar-accept.o \ 7 ar-accept.o \
8 ar-ack.o \ 8 ar-ack.o \
@@ -21,7 +21,7 @@ af-rxrpc-objs := \
21 ar-transport.o 21 ar-transport.o
22 22
23ifeq ($(CONFIG_PROC_FS),y) 23ifeq ($(CONFIG_PROC_FS),y)
24af-rxrpc-objs += ar-proc.o 24af-rxrpc-y += ar-proc.o
25endif 25endif
26 26
27obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o 27obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 9f1729bd60d..a53fb25a64e 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -47,12 +47,12 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
47 case AF_INET: 47 case AF_INET:
48 fl.oif = 0; 48 fl.oif = 0;
49 fl.proto = IPPROTO_UDP, 49 fl.proto = IPPROTO_UDP,
50 fl.nl_u.ip4_u.saddr = 0; 50 fl.fl4_dst = peer->srx.transport.sin.sin_addr.s_addr;
51 fl.nl_u.ip4_u.daddr = peer->srx.transport.sin.sin_addr.s_addr; 51 fl.fl4_src = 0;
52 fl.nl_u.ip4_u.tos = 0; 52 fl.fl4_tos = 0;
53 /* assume AFS.CM talking to AFS.FS */ 53 /* assume AFS.CM talking to AFS.FS */
54 fl.uli_u.ports.sport = htons(7001); 54 fl.fl_ip_sport = htons(7001);
55 fl.uli_u.ports.dport = htons(7000); 55 fl.fl_ip_dport = htons(7000);
56 break; 56 break;
57 default: 57 default:
58 BUG(); 58 BUG();
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5dbb3cd96e5..0918834ee4a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
60 60
61 /* check the reason of requeuing without tx lock first */ 61 /* check the reason of requeuing without tx lock first */
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 if (!netif_tx_queue_stopped(txq) && 63 if (!netif_tx_queue_frozen_or_stopped(txq)) {
64 !netif_tx_queue_frozen(txq)) {
65 q->gso_skb = NULL; 64 q->gso_skb = NULL;
66 q->q.qlen--; 65 q->q.qlen--;
67 } else 66 } else
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
122 spin_unlock(root_lock); 121 spin_unlock(root_lock);
123 122
124 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
125 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) 124 if (!netif_tx_queue_frozen_or_stopped(txq))
126 ret = dev_hard_start_xmit(skb, dev, txq); 125 ret = dev_hard_start_xmit(skb, dev, txq);
127 126
128 HARD_TX_UNLOCK(dev, txq); 127 HARD_TX_UNLOCK(dev, txq);
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
144 ret = dev_requeue_skb(skb, q); 143 ret = dev_requeue_skb(skb, q);
145 } 144 }
146 145
147 if (ret && (netif_tx_queue_stopped(txq) || 146 if (ret && netif_tx_queue_frozen_or_stopped(txq))
148 netif_tx_queue_frozen(txq)))
149 ret = 0; 147 ret = 0;
150 148
151 return ret; 149 return ret;
@@ -555,7 +553,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
555 size = QDISC_ALIGN(sizeof(*sch)); 553 size = QDISC_ALIGN(sizeof(*sch));
556 size += ops->priv_size + (QDISC_ALIGNTO - 1); 554 size += ops->priv_size + (QDISC_ALIGNTO - 1);
557 555
558 p = kzalloc(size, GFP_KERNEL); 556 p = kzalloc_node(size, GFP_KERNEL,
557 netdev_queue_numa_node_read(dev_queue));
558
559 if (!p) 559 if (!p)
560 goto errout; 560 goto errout;
561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 401af959670..106479a7c94 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -309,8 +309,7 @@ restart:
309 if (__netif_tx_trylock(slave_txq)) { 309 if (__netif_tx_trylock(slave_txq)) {
310 unsigned int length = qdisc_pkt_len(skb); 310 unsigned int length = qdisc_pkt_len(skb);
311 311
312 if (!netif_tx_queue_stopped(slave_txq) && 312 if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
313 !netif_tx_queue_frozen(slave_txq) &&
314 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { 313 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
315 txq_trans_update(slave_txq); 314 txq_trans_update(slave_txq);
316 __netif_tx_unlock(slave_txq); 315 __netif_tx_unlock(slave_txq);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6bd554323a3..842c7f3650b 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6047,7 +6047,7 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
6047 * will suddenly eat the receive_queue. 6047 * will suddenly eat the receive_queue.
6048 * 6048 *
6049 * Look at current nfs client by the way... 6049 * Look at current nfs client by the way...
6050 * However, this function was corrent in any case. 8) 6050 * However, this function was correct in any case. 8)
6051 */ 6051 */
6052 if (flags & MSG_PEEK) { 6052 if (flags & MSG_PEEK) {
6053 spin_lock_bh(&sk->sk_receive_queue.lock); 6053 spin_lock_bh(&sk->sk_receive_queue.lock);
diff --git a/net/socket.c b/net/socket.c
index 3ca2fd9e372..c898df76e92 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -156,7 +156,7 @@ static const struct file_operations socket_file_ops = {
156 */ 156 */
157 157
158static DEFINE_SPINLOCK(net_family_lock); 158static DEFINE_SPINLOCK(net_family_lock);
159static const struct net_proto_family *net_families[NPROTO] __read_mostly; 159static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
160 160
161/* 161/*
162 * Statistics counters of the socket lists 162 * Statistics counters of the socket lists
@@ -1200,7 +1200,7 @@ int __sock_create(struct net *net, int family, int type, int protocol,
1200 * requested real, full-featured networking support upon configuration. 1200 * requested real, full-featured networking support upon configuration.
1201 * Otherwise module support will break! 1201 * Otherwise module support will break!
1202 */ 1202 */
1203 if (net_families[family] == NULL) 1203 if (rcu_access_pointer(net_families[family]) == NULL)
1204 request_module("net-pf-%d", family); 1204 request_module("net-pf-%d", family);
1205#endif 1205#endif
1206 1206
@@ -2332,10 +2332,11 @@ int sock_register(const struct net_proto_family *ops)
2332 } 2332 }
2333 2333
2334 spin_lock(&net_family_lock); 2334 spin_lock(&net_family_lock);
2335 if (net_families[ops->family]) 2335 if (rcu_dereference_protected(net_families[ops->family],
2336 lockdep_is_held(&net_family_lock)))
2336 err = -EEXIST; 2337 err = -EEXIST;
2337 else { 2338 else {
2338 net_families[ops->family] = ops; 2339 rcu_assign_pointer(net_families[ops->family], ops);
2339 err = 0; 2340 err = 0;
2340 } 2341 }
2341 spin_unlock(&net_family_lock); 2342 spin_unlock(&net_family_lock);
@@ -2363,7 +2364,7 @@ void sock_unregister(int family)
2363 BUG_ON(family < 0 || family >= NPROTO); 2364 BUG_ON(family < 0 || family >= NPROTO);
2364 2365
2365 spin_lock(&net_family_lock); 2366 spin_lock(&net_family_lock);
2366 net_families[family] = NULL; 2367 rcu_assign_pointer(net_families[family], NULL);
2367 spin_unlock(&net_family_lock); 2368 spin_unlock(&net_family_lock);
2368 2369
2369 synchronize_rcu(); 2370 synchronize_rcu();
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index 7350d86a32e..9e4cb59ef9f 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -4,10 +4,10 @@
4 4
5obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o 5obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o
6 6
7auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ 7auth_rpcgss-y := auth_gss.o gss_generic_token.o \
8 gss_mech_switch.o svcauth_gss.o 8 gss_mech_switch.o svcauth_gss.o
9 9
10obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o 10obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
11 11
12rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ 12rpcsec_gss_krb5-y := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
13 gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o 13 gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 8a2e89bffde..886715a7525 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -35,11 +35,9 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "addr.h" 38#include "addr.h"
40#include "zone.h" 39#include "zone.h"
41#include "cluster.h" 40#include "cluster.h"
42#include "net.h"
43 41
44/** 42/**
45 * tipc_addr_domain_valid - validates a network domain address 43 * tipc_addr_domain_valid - validates a network domain address
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 22a60fc9839..6d828d9eda4 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -36,17 +36,9 @@
36 */ 36 */
37 37
38#include "core.h" 38#include "core.h"
39#include "msg.h"
40#include "dbg.h"
41#include "link.h" 39#include "link.h"
42#include "net.h"
43#include "node.h"
44#include "port.h" 40#include "port.h"
45#include "addr.h"
46#include "node_subscr.h"
47#include "name_distr.h" 41#include "name_distr.h"
48#include "bearer.h"
49#include "name_table.h"
50#include "bcast.h" 42#include "bcast.h"
51 43
52#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 44#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 9927d1d56c4..885da94be4a 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -36,12 +36,9 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "dbg.h"
40#include "bearer.h" 39#include "bearer.h"
41#include "link.h"
42#include "port.h" 40#include "port.h"
43#include "discover.h" 41#include "discover.h"
44#include "bcast.h"
45 42
46#define MAX_ADDR_STR 32 43#define MAX_ADDR_STR 32
47 44
@@ -625,7 +622,7 @@ int tipc_block_bearer(const char *name)
625 * Note: This routine assumes caller holds tipc_net_lock. 622 * Note: This routine assumes caller holds tipc_net_lock.
626 */ 623 */
627 624
628static int bearer_disable(struct bearer *b_ptr) 625static void bearer_disable(struct bearer *b_ptr)
629{ 626{
630 struct link *l_ptr; 627 struct link *l_ptr;
631 struct link *temp_l_ptr; 628 struct link *temp_l_ptr;
@@ -641,7 +638,6 @@ static int bearer_disable(struct bearer *b_ptr)
641 } 638 }
642 spin_unlock_bh(&b_ptr->publ.lock); 639 spin_unlock_bh(&b_ptr->publ.lock);
643 memset(b_ptr, 0, sizeof(struct bearer)); 640 memset(b_ptr, 0, sizeof(struct bearer));
644 return 0;
645} 641}
646 642
647int tipc_disable_bearer(const char *name) 643int tipc_disable_bearer(const char *name)
@@ -654,8 +650,10 @@ int tipc_disable_bearer(const char *name)
654 if (b_ptr == NULL) { 650 if (b_ptr == NULL) {
655 warn("Attempt to disable unknown bearer <%s>\n", name); 651 warn("Attempt to disable unknown bearer <%s>\n", name);
656 res = -EINVAL; 652 res = -EINVAL;
657 } else 653 } else {
658 res = bearer_disable(b_ptr); 654 bearer_disable(b_ptr);
655 res = 0;
656 }
659 write_unlock_bh(&tipc_net_lock); 657 write_unlock_bh(&tipc_net_lock);
660 return res; 658 return res;
661} 659}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index a850b389663..85f451d5aac 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -37,12 +37,50 @@
37#ifndef _TIPC_BEARER_H 37#ifndef _TIPC_BEARER_H
38#define _TIPC_BEARER_H 38#define _TIPC_BEARER_H
39 39
40#include "core.h"
41#include "bcast.h" 40#include "bcast.h"
42 41
43#define MAX_BEARERS 8 42#define MAX_BEARERS 8
44#define MAX_MEDIA 4 43#define MAX_MEDIA 4
45 44
45/*
46 * Identifiers of supported TIPC media types
47 */
48#define TIPC_MEDIA_TYPE_ETH 1
49
50/*
51 * Destination address structure used by TIPC bearers when sending messages
52 *
53 * IMPORTANT: The fields of this structure MUST be stored using the specified
54 * byte order indicated below, as the structure is exchanged between nodes
55 * as part of a link setup process.
56 */
57struct tipc_media_addr {
58 __be32 type; /* bearer type (network byte order) */
59 union {
60 __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */
61 } dev_addr;
62};
63
64/**
65 * struct tipc_bearer - TIPC bearer info available to media code
66 * @usr_handle: pointer to additional media-specific information about bearer
67 * @mtu: max packet size bearer can support
68 * @blocked: non-zero if bearer is blocked
69 * @lock: spinlock for controlling access to bearer
70 * @addr: media-specific address associated with bearer
71 * @name: bearer name (format = media:interface)
72 *
73 * Note: TIPC initializes "name" and "lock" fields; media code is responsible
74 * for initialization all other fields when a bearer is enabled.
75 */
76struct tipc_bearer {
77 void *usr_handle;
78 u32 mtu;
79 int blocked;
80 spinlock_t lock;
81 struct tipc_media_addr addr;
82 char name[TIPC_MAX_BEARER_NAME];
83};
46 84
47/** 85/**
48 * struct media - TIPC media information available to internal users 86 * struct media - TIPC media information available to internal users
@@ -55,7 +93,7 @@
55 * @priority: default link (and bearer) priority 93 * @priority: default link (and bearer) priority
56 * @tolerance: default time (in ms) before declaring link failure 94 * @tolerance: default time (in ms) before declaring link failure
57 * @window: default window (in packets) before declaring link congestion 95 * @window: default window (in packets) before declaring link congestion
58 * @type_id: TIPC media identifier [defined in tipc_bearer.h] 96 * @type_id: TIPC media identifier
59 * @name: media name 97 * @name: media name
60 */ 98 */
61 99
@@ -116,6 +154,34 @@ struct link;
116 154
117extern struct bearer tipc_bearers[]; 155extern struct bearer tipc_bearers[];
118 156
157/*
158 * TIPC routines available to supported media types
159 */
160int tipc_register_media(u32 media_type,
161 char *media_name, int (*enable)(struct tipc_bearer *),
162 void (*disable)(struct tipc_bearer *),
163 int (*send_msg)(struct sk_buff *,
164 struct tipc_bearer *, struct tipc_media_addr *),
165 char *(*addr2str)(struct tipc_media_addr *a,
166 char *str_buf, int str_size),
167 struct tipc_media_addr *bcast_addr, const u32 bearer_priority,
168 const u32 link_tolerance, /* [ms] */
169 const u32 send_window_limit);
170
171void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
172
173int tipc_block_bearer(const char *name);
174void tipc_continue(struct tipc_bearer *tb_ptr);
175
176int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
177int tipc_disable_bearer(const char *name);
178
179/*
180 * Routines made available to TIPC by supported media types
181 */
182int tipc_eth_media_start(void);
183void tipc_eth_media_stop(void);
184
119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); 185void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
120struct sk_buff *tipc_media_get_names(void); 186struct sk_buff *tipc_media_get_names(void);
121 187
@@ -126,7 +192,6 @@ void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
126struct bearer *tipc_bearer_find_interface(const char *if_name); 192struct bearer *tipc_bearer_find_interface(const char *if_name);
127int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr); 193int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
128int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr); 194int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr);
129int tipc_bearer_init(void);
130void tipc_bearer_stop(void); 195void tipc_bearer_stop(void);
131void tipc_bearer_lock_push(struct bearer *b_ptr); 196void tipc_bearer_lock_push(struct bearer *b_ptr);
132 197
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 7fea14b98b9..405be87157b 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -36,17 +36,10 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "cluster.h" 38#include "cluster.h"
39#include "addr.h"
40#include "node_subscr.h"
41#include "link.h" 39#include "link.h"
42#include "node.h"
43#include "net.h"
44#include "msg.h"
45#include "bearer.h"
46 40
47static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf, 41static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
48 u32 lower, u32 upper); 42 u32 lower, u32 upper);
49static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest);
50 43
51struct tipc_node **tipc_local_nodes = NULL; 44struct tipc_node **tipc_local_nodes = NULL;
52struct tipc_node_map tipc_cltr_bcast_nodes = {0,{0,}}; 45struct tipc_node_map tipc_cltr_bcast_nodes = {0,{0,}};
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 50a6133a366..bdde39f0436 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -35,23 +35,11 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "bearer.h"
40#include "port.h" 38#include "port.h"
41#include "link.h" 39#include "link.h"
42#include "zone.h"
43#include "addr.h"
44#include "name_table.h" 40#include "name_table.h"
45#include "node.h" 41#include "user_reg.h"
46#include "config.h" 42#include "config.h"
47#include "discover.h"
48
49struct subscr_data {
50 char usr_handle[8];
51 u32 domain;
52 u32 port_ref;
53 struct list_head subd_list;
54};
55 43
56struct manager { 44struct manager {
57 u32 user_ref; 45 u32 user_ref;
@@ -572,7 +560,7 @@ int tipc_cfg_init(void)
572 struct tipc_name_seq seq; 560 struct tipc_name_seq seq;
573 int res; 561 int res;
574 562
575 res = tipc_attach(&mng.user_ref, NULL, NULL); 563 res = tipc_attach(&mng.user_ref);
576 if (res) 564 if (res)
577 goto failed; 565 goto failed;
578 566
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 481e12ece71..443159a166f 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -39,7 +39,6 @@
39 39
40/* ---------------------------------------------------------------------- */ 40/* ---------------------------------------------------------------------- */
41 41
42#include "core.h"
43#include "link.h" 42#include "link.h"
44 43
45struct sk_buff *tipc_cfg_reply_alloc(int payload_size); 44struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index e2a09eb8efd..f5d62c174de 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -40,7 +40,6 @@
40#include <linux/random.h> 40#include <linux/random.h>
41 41
42#include "core.h" 42#include "core.h"
43#include "dbg.h"
44#include "ref.h" 43#include "ref.h"
45#include "net.h" 44#include "net.h"
46#include "user_reg.h" 45#include "user_reg.h"
@@ -236,43 +235,3 @@ module_exit(tipc_exit);
236MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication"); 235MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
237MODULE_LICENSE("Dual BSD/GPL"); 236MODULE_LICENSE("Dual BSD/GPL");
238MODULE_VERSION(TIPC_MOD_VER); 237MODULE_VERSION(TIPC_MOD_VER);
239
240/* Native TIPC API for kernel-space applications (see tipc.h) */
241
242EXPORT_SYMBOL(tipc_attach);
243EXPORT_SYMBOL(tipc_detach);
244EXPORT_SYMBOL(tipc_createport);
245EXPORT_SYMBOL(tipc_deleteport);
246EXPORT_SYMBOL(tipc_ownidentity);
247EXPORT_SYMBOL(tipc_portimportance);
248EXPORT_SYMBOL(tipc_set_portimportance);
249EXPORT_SYMBOL(tipc_portunreliable);
250EXPORT_SYMBOL(tipc_set_portunreliable);
251EXPORT_SYMBOL(tipc_portunreturnable);
252EXPORT_SYMBOL(tipc_set_portunreturnable);
253EXPORT_SYMBOL(tipc_publish);
254EXPORT_SYMBOL(tipc_withdraw);
255EXPORT_SYMBOL(tipc_connect2port);
256EXPORT_SYMBOL(tipc_disconnect);
257EXPORT_SYMBOL(tipc_shutdown);
258EXPORT_SYMBOL(tipc_send);
259EXPORT_SYMBOL(tipc_send2name);
260EXPORT_SYMBOL(tipc_send2port);
261EXPORT_SYMBOL(tipc_multicast);
262
263/* TIPC API for external bearers (see tipc_bearer.h) */
264
265EXPORT_SYMBOL(tipc_block_bearer);
266EXPORT_SYMBOL(tipc_continue);
267EXPORT_SYMBOL(tipc_disable_bearer);
268EXPORT_SYMBOL(tipc_enable_bearer);
269EXPORT_SYMBOL(tipc_recv_msg);
270EXPORT_SYMBOL(tipc_register_media);
271
272/* TIPC API for external APIs (see tipc_port.h) */
273
274EXPORT_SYMBOL(tipc_createport_raw);
275EXPORT_SYMBOL(tipc_reject_msg);
276EXPORT_SYMBOL(tipc_send_buf_fast);
277EXPORT_SYMBOL(tipc_acknowledge);
278
diff --git a/net/tipc/core.h b/net/tipc/core.h
index e19389e5722..ca7e171c104 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -39,10 +39,6 @@
39 39
40#include <linux/tipc.h> 40#include <linux/tipc.h>
41#include <linux/tipc_config.h> 41#include <linux/tipc_config.h>
42#include <net/tipc/tipc_msg.h>
43#include <net/tipc/tipc_port.h>
44#include <net/tipc/tipc_bearer.h>
45#include <net/tipc/tipc.h>
46#include <linux/types.h> 42#include <linux/types.h>
47#include <linux/kernel.h> 43#include <linux/kernel.h>
48#include <linux/errno.h> 44#include <linux/errno.h>
@@ -62,6 +58,9 @@
62 58
63#define TIPC_MOD_VER "2.0.0" 59#define TIPC_MOD_VER "2.0.0"
64 60
61struct tipc_msg; /* msg.h */
62struct print_buf; /* dbg.h */
63
65/* 64/*
66 * TIPC sanity test macros 65 * TIPC sanity test macros
67 */ 66 */
@@ -174,6 +173,13 @@ void tipc_dump_dbg(struct print_buf *, const char *fmt, ...);
174#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */ 173#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
175 174
176/* 175/*
176 * TIPC operating mode routines
177 */
178#define TIPC_NOT_RUNNING 0
179#define TIPC_NODE_MODE 1
180#define TIPC_NET_MODE 2
181
182/*
177 * Global configuration variables 183 * Global configuration variables
178 */ 184 */
179 185
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 4a7cd3719b7..f2ce36baf42 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -35,9 +35,7 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "link.h" 38#include "link.h"
40#include "zone.h"
41#include "discover.h" 39#include "discover.h"
42#include "port.h" 40#include "port.h"
43#include "name_table.h" 41#include "name_table.h"
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index f8e75063612..d2c3cffb79f 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -37,8 +37,6 @@
37#ifndef _TIPC_DISCOVER_H 37#ifndef _TIPC_DISCOVER_H
38#define _TIPC_DISCOVER_H 38#define _TIPC_DISCOVER_H
39 39
40#include "core.h"
41
42struct link_req; 40struct link_req;
43 41
44struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr, 42struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 6e988ba485f..ee683cc8f4b 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -34,13 +34,13 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include <net/tipc/tipc.h>
38#include <net/tipc/tipc_bearer.h>
39#include <net/tipc/tipc_msg.h>
40#include <linux/netdevice.h> 37#include <linux/netdevice.h>
41#include <linux/slab.h> 38#include <linux/slab.h>
42#include <net/net_namespace.h> 39#include <net/net_namespace.h>
43 40
41#include "core.h"
42#include "bearer.h"
43
44#define MAX_ETH_BEARERS 2 44#define MAX_ETH_BEARERS 2
45#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI 45#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI
46#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL 46#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b31992ccd5d..cf414cf05e7 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -35,19 +35,11 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "link.h" 38#include "link.h"
40#include "net.h"
41#include "node.h"
42#include "port.h" 39#include "port.h"
43#include "addr.h"
44#include "node_subscr.h"
45#include "name_distr.h" 40#include "name_distr.h"
46#include "bearer.h"
47#include "name_table.h"
48#include "discover.h" 41#include "discover.h"
49#include "config.h" 42#include "config.h"
50#include "bcast.h"
51 43
52 44
53/* 45/*
@@ -57,12 +49,6 @@
57#define INVALID_SESSION 0x10000 49#define INVALID_SESSION 0x10000
58 50
59/* 51/*
60 * Limit for deferred reception queue:
61 */
62
63#define DEF_QUEUE_LIMIT 256u
64
65/*
66 * Link state events: 52 * Link state events:
67 */ 53 */
68 54
diff --git a/net/tipc/link.h b/net/tipc/link.h
index f98bc613de6..c562888d25d 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -39,7 +39,6 @@
39 39
40#include "dbg.h" 40#include "dbg.h"
41#include "msg.h" 41#include "msg.h"
42#include "bearer.h"
43#include "node.h" 42#include "node.h"
44 43
45#define PUSH_FAILED 1 44#define PUSH_FAILED 1
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index ecb532fb035..ee6b4c68d4a 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -36,9 +36,7 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "addr.h" 38#include "addr.h"
39#include "dbg.h"
40#include "msg.h" 39#include "msg.h"
41#include "bearer.h"
42 40
43u32 tipc_msg_tot_importance(struct tipc_msg *m) 41u32 tipc_msg_tot_importance(struct tipc_msg *m)
44{ 42{
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 031aad18efc..aee53864d7a 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -37,10 +37,51 @@
37#ifndef _TIPC_MSG_H 37#ifndef _TIPC_MSG_H
38#define _TIPC_MSG_H 38#define _TIPC_MSG_H
39 39
40#include "core.h" 40#include "bearer.h"
41 41
42#define TIPC_VERSION 2 42#define TIPC_VERSION 2
43 43
44/*
45 * TIPC user data message header format, version 2:
46 *
47 *
48 * 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
49 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
50 * w0:|vers | user |hdr sz |n|d|s|-| message size |
51 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
52 * w1:|mstyp| error |rer cnt|lsc|opt p| broadcast ack no |
53 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
54 * w2:| link level ack no | broadcast/link level seq no |
55 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
56 * w3:| previous node |
57 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
58 * w4:| originating port |
59 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
60 * w5:| destination port |
61 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
62 * w6:| originating node |
63 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
64 * w7:| destination node |
65 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
66 * w8:| name type / transport sequence number |
67 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
68 * w9:| name instance/multicast lower bound |
69 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
70 * wA:| multicast upper bound |
71 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72 * / /
73 * \ options \
74 * / /
75 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
76 *
77 */
78
79#define TIPC_CONN_MSG 0
80#define TIPC_MCAST_MSG 1
81#define TIPC_NAMED_MSG 2
82#define TIPC_DIRECT_MSG 3
83
84
44#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */ 85#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */
45#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */ 86#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */
46#define LONG_H_SIZE 40 /* Named messages */ 87#define LONG_H_SIZE 40 /* Named messages */
@@ -52,20 +93,26 @@
52#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) 93#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
53 94
54 95
55/* 96struct tipc_msg {
56 TIPC user data message header format, version 2 97 __be32 hdr[15];
98};
57 99
58 - Fundamental definitions available to privileged TIPC users
59 are located in tipc_msg.h.
60 - Remaining definitions available to TIPC internal users appear below.
61*/
62 100
101static inline u32 msg_word(struct tipc_msg *m, u32 pos)
102{
103 return ntohl(m->hdr[pos]);
104}
63 105
64static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val) 106static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val)
65{ 107{
66 m->hdr[w] = htonl(val); 108 m->hdr[w] = htonl(val);
67} 109}
68 110
111static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
112{
113 return (msg_word(m, w) >> pos) & mask;
114}
115
69static inline void msg_set_bits(struct tipc_msg *m, u32 w, 116static inline void msg_set_bits(struct tipc_msg *m, u32 w,
70 u32 pos, u32 mask, u32 val) 117 u32 pos, u32 mask, u32 val)
71{ 118{
@@ -112,16 +159,36 @@ static inline void msg_set_user(struct tipc_msg *m, u32 n)
112 msg_set_bits(m, 0, 25, 0xf, n); 159 msg_set_bits(m, 0, 25, 0xf, n);
113} 160}
114 161
162static inline u32 msg_importance(struct tipc_msg *m)
163{
164 return msg_bits(m, 0, 25, 0xf);
165}
166
115static inline void msg_set_importance(struct tipc_msg *m, u32 i) 167static inline void msg_set_importance(struct tipc_msg *m, u32 i)
116{ 168{
117 msg_set_user(m, i); 169 msg_set_user(m, i);
118} 170}
119 171
172static inline u32 msg_hdr_sz(struct tipc_msg *m)
173{
174 return msg_bits(m, 0, 21, 0xf) << 2;
175}
176
120static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n) 177static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
121{ 178{
122 msg_set_bits(m, 0, 21, 0xf, n>>2); 179 msg_set_bits(m, 0, 21, 0xf, n>>2);
123} 180}
124 181
182static inline u32 msg_size(struct tipc_msg *m)
183{
184 return msg_bits(m, 0, 0, 0x1ffff);
185}
186
187static inline u32 msg_data_sz(struct tipc_msg *m)
188{
189 return msg_size(m) - msg_hdr_sz(m);
190}
191
125static inline int msg_non_seq(struct tipc_msg *m) 192static inline int msg_non_seq(struct tipc_msg *m)
126{ 193{
127 return msg_bits(m, 0, 20, 1); 194 return msg_bits(m, 0, 20, 1);
@@ -162,11 +229,36 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
162 * Word 1 229 * Word 1
163 */ 230 */
164 231
232static inline u32 msg_type(struct tipc_msg *m)
233{
234 return msg_bits(m, 1, 29, 0x7);
235}
236
165static inline void msg_set_type(struct tipc_msg *m, u32 n) 237static inline void msg_set_type(struct tipc_msg *m, u32 n)
166{ 238{
167 msg_set_bits(m, 1, 29, 0x7, n); 239 msg_set_bits(m, 1, 29, 0x7, n);
168} 240}
169 241
242static inline u32 msg_named(struct tipc_msg *m)
243{
244 return msg_type(m) == TIPC_NAMED_MSG;
245}
246
247static inline u32 msg_mcast(struct tipc_msg *m)
248{
249 return msg_type(m) == TIPC_MCAST_MSG;
250}
251
252static inline u32 msg_connected(struct tipc_msg *m)
253{
254 return msg_type(m) == TIPC_CONN_MSG;
255}
256
257static inline u32 msg_errcode(struct tipc_msg *m)
258{
259 return msg_bits(m, 1, 25, 0xf);
260}
261
170static inline void msg_set_errcode(struct tipc_msg *m, u32 err) 262static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
171{ 263{
172 msg_set_bits(m, 1, 25, 0xf, err); 264 msg_set_bits(m, 1, 25, 0xf, err);
@@ -257,31 +349,68 @@ static inline void msg_set_destnode_cache(struct tipc_msg *m, u32 dnode)
257 */ 349 */
258 350
259 351
352static inline u32 msg_prevnode(struct tipc_msg *m)
353{
354 return msg_word(m, 3);
355}
356
260static inline void msg_set_prevnode(struct tipc_msg *m, u32 a) 357static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
261{ 358{
262 msg_set_word(m, 3, a); 359 msg_set_word(m, 3, a);
263} 360}
264 361
362static inline u32 msg_origport(struct tipc_msg *m)
363{
364 return msg_word(m, 4);
365}
366
265static inline void msg_set_origport(struct tipc_msg *m, u32 p) 367static inline void msg_set_origport(struct tipc_msg *m, u32 p)
266{ 368{
267 msg_set_word(m, 4, p); 369 msg_set_word(m, 4, p);
268} 370}
269 371
372static inline u32 msg_destport(struct tipc_msg *m)
373{
374 return msg_word(m, 5);
375}
376
270static inline void msg_set_destport(struct tipc_msg *m, u32 p) 377static inline void msg_set_destport(struct tipc_msg *m, u32 p)
271{ 378{
272 msg_set_word(m, 5, p); 379 msg_set_word(m, 5, p);
273} 380}
274 381
382static inline u32 msg_mc_netid(struct tipc_msg *m)
383{
384 return msg_word(m, 5);
385}
386
275static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p) 387static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
276{ 388{
277 msg_set_word(m, 5, p); 389 msg_set_word(m, 5, p);
278} 390}
279 391
392static inline int msg_short(struct tipc_msg *m)
393{
394 return msg_hdr_sz(m) == 24;
395}
396
397static inline u32 msg_orignode(struct tipc_msg *m)
398{
399 if (likely(msg_short(m)))
400 return msg_prevnode(m);
401 return msg_word(m, 6);
402}
403
280static inline void msg_set_orignode(struct tipc_msg *m, u32 a) 404static inline void msg_set_orignode(struct tipc_msg *m, u32 a)
281{ 405{
282 msg_set_word(m, 6, a); 406 msg_set_word(m, 6, a);
283} 407}
284 408
409static inline u32 msg_destnode(struct tipc_msg *m)
410{
411 return msg_word(m, 7);
412}
413
285static inline void msg_set_destnode(struct tipc_msg *m, u32 a) 414static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
286{ 415{
287 msg_set_word(m, 7, a); 416 msg_set_word(m, 7, a);
@@ -299,6 +428,11 @@ static inline u32 msg_routed(struct tipc_msg *m)
299 return(msg_destnode(m) ^ msg_orignode(m)) >> 11; 428 return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
300} 429}
301 430
431static inline u32 msg_nametype(struct tipc_msg *m)
432{
433 return msg_word(m, 8);
434}
435
302static inline void msg_set_nametype(struct tipc_msg *m, u32 n) 436static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
303{ 437{
304 msg_set_word(m, 8, n); 438 msg_set_word(m, 8, n);
@@ -324,6 +458,16 @@ static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
324 msg_set_word(m, 8, n); 458 msg_set_word(m, 8, n);
325} 459}
326 460
461static inline u32 msg_nameinst(struct tipc_msg *m)
462{
463 return msg_word(m, 9);
464}
465
466static inline u32 msg_namelower(struct tipc_msg *m)
467{
468 return msg_nameinst(m);
469}
470
327static inline void msg_set_namelower(struct tipc_msg *m, u32 n) 471static inline void msg_set_namelower(struct tipc_msg *m, u32 n)
328{ 472{
329 msg_set_word(m, 9, n); 473 msg_set_word(m, 9, n);
@@ -334,11 +478,21 @@ static inline void msg_set_nameinst(struct tipc_msg *m, u32 n)
334 msg_set_namelower(m, n); 478 msg_set_namelower(m, n);
335} 479}
336 480
481static inline u32 msg_nameupper(struct tipc_msg *m)
482{
483 return msg_word(m, 10);
484}
485
337static inline void msg_set_nameupper(struct tipc_msg *m, u32 n) 486static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
338{ 487{
339 msg_set_word(m, 10, n); 488 msg_set_word(m, 10, n);
340} 489}
341 490
491static inline unchar *msg_data(struct tipc_msg *m)
492{
493 return ((unchar *)m) + msg_hdr_sz(m);
494}
495
342static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) 496static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
343{ 497{
344 return (struct tipc_msg *)msg_data(m); 498 return (struct tipc_msg *)msg_data(m);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 7b907171f87..10ff48be3c0 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -36,9 +36,7 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "cluster.h" 38#include "cluster.h"
39#include "dbg.h"
40#include "link.h" 39#include "link.h"
41#include "msg.h"
42#include "name_distr.h" 40#include "name_distr.h"
43 41
44#define ITEM_SIZE sizeof(struct distr_item) 42#define ITEM_SIZE sizeof(struct distr_item)
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 3a8de4334da..d5adb045674 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -36,15 +36,10 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "dbg.h"
40#include "name_table.h" 39#include "name_table.h"
41#include "name_distr.h" 40#include "name_distr.h"
42#include "addr.h"
43#include "node_subscr.h"
44#include "subscr.h" 41#include "subscr.h"
45#include "port.h" 42#include "port.h"
46#include "cluster.h"
47#include "bcast.h"
48 43
49static int tipc_nametbl_size = 1024; /* must be a power of 2 */ 44static int tipc_nametbl_size = 1024; /* must be a power of 2 */
50 45
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 1a621cfd660..c2b4b86c2e6 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -35,18 +35,13 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "bearer.h"
39#include "net.h" 38#include "net.h"
40#include "zone.h" 39#include "zone.h"
41#include "addr.h"
42#include "name_table.h" 40#include "name_table.h"
43#include "name_distr.h" 41#include "name_distr.h"
44#include "subscr.h" 42#include "subscr.h"
45#include "link.h" 43#include "link.h"
46#include "msg.h"
47#include "port.h" 44#include "port.h"
48#include "bcast.h"
49#include "discover.h"
50#include "config.h" 45#include "config.h"
51 46
52/* 47/*
diff --git a/net/tipc/node.c b/net/tipc/node.c
index b4d87eb2dc5..df71dfc3a9a 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -37,16 +37,9 @@
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "node.h" 39#include "node.h"
40#include "cluster.h"
41#include "net.h"
42#include "addr.h"
43#include "node_subscr.h"
44#include "link.h"
45#include "port.h" 40#include "port.h"
46#include "bearer.h"
47#include "name_distr.h" 41#include "name_distr.h"
48 42
49void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str);
50static void node_lost_contact(struct tipc_node *n_ptr); 43static void node_lost_contact(struct tipc_node *n_ptr);
51static void node_established_contact(struct tipc_node *n_ptr); 44static void node_established_contact(struct tipc_node *n_ptr);
52 45
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 19194d476a9..018a55332d9 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -35,10 +35,8 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "node_subscr.h" 38#include "node_subscr.h"
40#include "node.h" 39#include "node.h"
41#include "addr.h"
42 40
43/** 41/**
44 * tipc_nodesub_subscribe - create "node down" subscription for specified node 42 * tipc_nodesub_subscribe - create "node down" subscription for specified node
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 82092eaa153..7873283f496 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -36,15 +36,9 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "dbg.h"
40#include "port.h" 39#include "port.h"
41#include "addr.h"
42#include "link.h"
43#include "node.h"
44#include "name_table.h" 40#include "name_table.h"
45#include "user_reg.h" 41#include "user_reg.h"
46#include "msg.h"
47#include "bcast.h"
48 42
49/* Connection management: */ 43/* Connection management: */
50#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */ 44#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
@@ -94,7 +88,7 @@ static void port_incr_out_seqno(struct port *p_ptr)
94 * tipc_multicast - send a multicast message to local and remote destinations 88 * tipc_multicast - send a multicast message to local and remote destinations
95 */ 89 */
96 90
97int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain, 91int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
98 u32 num_sect, struct iovec const *msg_sect) 92 u32 num_sect, struct iovec const *msg_sect)
99{ 93{
100 struct tipc_msg *hdr; 94 struct tipc_msg *hdr;
@@ -989,13 +983,6 @@ int tipc_createport(u32 user_ref,
989 return 0; 983 return 0;
990} 984}
991 985
992int tipc_ownidentity(u32 ref, struct tipc_portid *id)
993{
994 id->ref = ref;
995 id->node = tipc_own_addr;
996 return 0;
997}
998
999int tipc_portimportance(u32 ref, unsigned int *importance) 986int tipc_portimportance(u32 ref, unsigned int *importance)
1000{ 987{
1001 struct port *p_ptr; 988 struct port *p_ptr;
@@ -1271,16 +1258,11 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1271} 1258}
1272 1259
1273/** 1260/**
1274 * tipc_forward2name - forward message sections to port name 1261 * tipc_send2name - send message sections to port name
1275 */ 1262 */
1276 1263
1277static int tipc_forward2name(u32 ref, 1264int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1278 struct tipc_name const *name, 1265 unsigned int num_sect, struct iovec const *msg_sect)
1279 u32 domain,
1280 u32 num_sect,
1281 struct iovec const *msg_sect,
1282 struct tipc_portid const *orig,
1283 unsigned int importance)
1284{ 1266{
1285 struct port *p_ptr; 1267 struct port *p_ptr;
1286 struct tipc_msg *msg; 1268 struct tipc_msg *msg;
@@ -1294,14 +1276,12 @@ static int tipc_forward2name(u32 ref,
1294 1276
1295 msg = &p_ptr->publ.phdr; 1277 msg = &p_ptr->publ.phdr;
1296 msg_set_type(msg, TIPC_NAMED_MSG); 1278 msg_set_type(msg, TIPC_NAMED_MSG);
1297 msg_set_orignode(msg, orig->node); 1279 msg_set_orignode(msg, tipc_own_addr);
1298 msg_set_origport(msg, orig->ref); 1280 msg_set_origport(msg, ref);
1299 msg_set_hdr_sz(msg, LONG_H_SIZE); 1281 msg_set_hdr_sz(msg, LONG_H_SIZE);
1300 msg_set_nametype(msg, name->type); 1282 msg_set_nametype(msg, name->type);
1301 msg_set_nameinst(msg, name->instance); 1283 msg_set_nameinst(msg, name->instance);
1302 msg_set_lookup_scope(msg, tipc_addr_scope(domain)); 1284 msg_set_lookup_scope(msg, tipc_addr_scope(domain));
1303 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1304 msg_set_importance(msg,importance);
1305 destport = tipc_nametbl_translate(name->type, name->instance, &destnode); 1285 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1306 msg_set_destnode(msg, destnode); 1286 msg_set_destnode(msg, destnode);
1307 msg_set_destport(msg, destport); 1287 msg_set_destport(msg, destport);
@@ -1325,33 +1305,11 @@ static int tipc_forward2name(u32 ref,
1325} 1305}
1326 1306
1327/** 1307/**
1328 * tipc_send2name - send message sections to port name 1308 * tipc_send2port - send message sections to port identity
1329 */
1330
1331int tipc_send2name(u32 ref,
1332 struct tipc_name const *name,
1333 unsigned int domain,
1334 unsigned int num_sect,
1335 struct iovec const *msg_sect)
1336{
1337 struct tipc_portid orig;
1338
1339 orig.ref = ref;
1340 orig.node = tipc_own_addr;
1341 return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
1342 TIPC_PORT_IMPORTANCE);
1343}
1344
1345/**
1346 * tipc_forward2port - forward message sections to port identity
1347 */ 1309 */
1348 1310
1349static int tipc_forward2port(u32 ref, 1311int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1350 struct tipc_portid const *dest, 1312 unsigned int num_sect, struct iovec const *msg_sect)
1351 unsigned int num_sect,
1352 struct iovec const *msg_sect,
1353 struct tipc_portid const *orig,
1354 unsigned int importance)
1355{ 1313{
1356 struct port *p_ptr; 1314 struct port *p_ptr;
1357 struct tipc_msg *msg; 1315 struct tipc_msg *msg;
@@ -1363,13 +1321,11 @@ static int tipc_forward2port(u32 ref,
1363 1321
1364 msg = &p_ptr->publ.phdr; 1322 msg = &p_ptr->publ.phdr;
1365 msg_set_type(msg, TIPC_DIRECT_MSG); 1323 msg_set_type(msg, TIPC_DIRECT_MSG);
1366 msg_set_orignode(msg, orig->node); 1324 msg_set_orignode(msg, tipc_own_addr);
1367 msg_set_origport(msg, orig->ref); 1325 msg_set_origport(msg, ref);
1368 msg_set_destnode(msg, dest->node); 1326 msg_set_destnode(msg, dest->node);
1369 msg_set_destport(msg, dest->ref); 1327 msg_set_destport(msg, dest->ref);
1370 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE); 1328 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1371 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1372 msg_set_importance(msg, importance);
1373 p_ptr->sent++; 1329 p_ptr->sent++;
1374 if (dest->node == tipc_own_addr) 1330 if (dest->node == tipc_own_addr)
1375 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1331 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
@@ -1384,31 +1340,11 @@ static int tipc_forward2port(u32 ref,
1384} 1340}
1385 1341
1386/** 1342/**
1387 * tipc_send2port - send message sections to port identity 1343 * tipc_send_buf2port - send message buffer to port identity
1388 */ 1344 */
1389 1345
1390int tipc_send2port(u32 ref, 1346int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1391 struct tipc_portid const *dest, 1347 struct sk_buff *buf, unsigned int dsz)
1392 unsigned int num_sect,
1393 struct iovec const *msg_sect)
1394{
1395 struct tipc_portid orig;
1396
1397 orig.ref = ref;
1398 orig.node = tipc_own_addr;
1399 return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
1400 TIPC_PORT_IMPORTANCE);
1401}
1402
1403/**
1404 * tipc_forward_buf2port - forward message buffer to port identity
1405 */
1406static int tipc_forward_buf2port(u32 ref,
1407 struct tipc_portid const *dest,
1408 struct sk_buff *buf,
1409 unsigned int dsz,
1410 struct tipc_portid const *orig,
1411 unsigned int importance)
1412{ 1348{
1413 struct port *p_ptr; 1349 struct port *p_ptr;
1414 struct tipc_msg *msg; 1350 struct tipc_msg *msg;
@@ -1420,13 +1356,11 @@ static int tipc_forward_buf2port(u32 ref,
1420 1356
1421 msg = &p_ptr->publ.phdr; 1357 msg = &p_ptr->publ.phdr;
1422 msg_set_type(msg, TIPC_DIRECT_MSG); 1358 msg_set_type(msg, TIPC_DIRECT_MSG);
1423 msg_set_orignode(msg, orig->node); 1359 msg_set_orignode(msg, tipc_own_addr);
1424 msg_set_origport(msg, orig->ref); 1360 msg_set_origport(msg, ref);
1425 msg_set_destnode(msg, dest->node); 1361 msg_set_destnode(msg, dest->node);
1426 msg_set_destport(msg, dest->ref); 1362 msg_set_destport(msg, dest->ref);
1427 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE); 1363 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1428 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1429 msg_set_importance(msg, importance);
1430 msg_set_size(msg, DIR_MSG_H_SIZE + dsz); 1364 msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
1431 if (skb_cow(buf, DIR_MSG_H_SIZE)) 1365 if (skb_cow(buf, DIR_MSG_H_SIZE))
1432 return -ENOMEM; 1366 return -ENOMEM;
@@ -1445,20 +1379,3 @@ static int tipc_forward_buf2port(u32 ref,
1445 return -ELINKCONG; 1379 return -ELINKCONG;
1446} 1380}
1447 1381
1448/**
1449 * tipc_send_buf2port - send message buffer to port identity
1450 */
1451
1452int tipc_send_buf2port(u32 ref,
1453 struct tipc_portid const *dest,
1454 struct sk_buff *buf,
1455 unsigned int dsz)
1456{
1457 struct tipc_portid orig;
1458
1459 orig.ref = ref;
1460 orig.node = tipc_own_addr;
1461 return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
1462 TIPC_PORT_IMPORTANCE);
1463}
1464
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 73bbf442b34..3a807fcec2b 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -37,13 +37,44 @@
37#ifndef _TIPC_PORT_H 37#ifndef _TIPC_PORT_H
38#define _TIPC_PORT_H 38#define _TIPC_PORT_H
39 39
40#include "core.h"
41#include "ref.h" 40#include "ref.h"
42#include "net.h" 41#include "net.h"
43#include "msg.h" 42#include "msg.h"
44#include "dbg.h"
45#include "node_subscr.h" 43#include "node_subscr.h"
46 44
45#define TIPC_FLOW_CONTROL_WIN 512
46
47typedef void (*tipc_msg_err_event) (void *usr_handle, u32 portref,
48 struct sk_buff **buf, unsigned char const *data,
49 unsigned int size, int reason,
50 struct tipc_portid const *attmpt_destid);
51
52typedef void (*tipc_named_msg_err_event) (void *usr_handle, u32 portref,
53 struct sk_buff **buf, unsigned char const *data,
54 unsigned int size, int reason,
55 struct tipc_name_seq const *attmpt_dest);
56
57typedef void (*tipc_conn_shutdown_event) (void *usr_handle, u32 portref,
58 struct sk_buff **buf, unsigned char const *data,
59 unsigned int size, int reason);
60
61typedef void (*tipc_msg_event) (void *usr_handle, u32 portref,
62 struct sk_buff **buf, unsigned char const *data,
63 unsigned int size, unsigned int importance,
64 struct tipc_portid const *origin);
65
66typedef void (*tipc_named_msg_event) (void *usr_handle, u32 portref,
67 struct sk_buff **buf, unsigned char const *data,
68 unsigned int size, unsigned int importance,
69 struct tipc_portid const *orig,
70 struct tipc_name_seq const *dest);
71
72typedef void (*tipc_conn_msg_event) (void *usr_handle, u32 portref,
73 struct sk_buff **buf, unsigned char const *data,
74 unsigned int size);
75
76typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
77
47/** 78/**
48 * struct user_port - TIPC user port (used with native API) 79 * struct user_port - TIPC user port (used with native API)
49 * @user_ref: id of user who created user port 80 * @user_ref: id of user who created user port
@@ -68,6 +99,34 @@ struct user_port {
68}; 99};
69 100
70/** 101/**
102 * struct tipc_port - TIPC port info available to socket API
103 * @usr_handle: pointer to additional user-defined information about port
104 * @lock: pointer to spinlock for controlling access to port
105 * @connected: non-zero if port is currently connected to a peer port
106 * @conn_type: TIPC type used when connection was established
107 * @conn_instance: TIPC instance used when connection was established
108 * @conn_unacked: number of unacknowledged messages received from peer port
109 * @published: non-zero if port has one or more associated names
110 * @congested: non-zero if cannot send because of link or port congestion
111 * @max_pkt: maximum packet size "hint" used when building messages sent by port
112 * @ref: unique reference to port in TIPC object registry
113 * @phdr: preformatted message header used when sending messages
114 */
115struct tipc_port {
116 void *usr_handle;
117 spinlock_t *lock;
118 int connected;
119 u32 conn_type;
120 u32 conn_instance;
121 u32 conn_unacked;
122 int published;
123 u32 congested;
124 u32 max_pkt;
125 u32 ref;
126 struct tipc_msg phdr;
127};
128
129/**
71 * struct port - TIPC port structure 130 * struct port - TIPC port structure
72 * @publ: TIPC port info available to privileged users 131 * @publ: TIPC port info available to privileged users
73 * @port_list: adjacent ports in TIPC's global list of ports 132 * @port_list: adjacent ports in TIPC's global list of ports
@@ -109,11 +168,76 @@ struct port {
109extern spinlock_t tipc_port_list_lock; 168extern spinlock_t tipc_port_list_lock;
110struct port_list; 169struct port_list;
111 170
171/*
172 * TIPC port manipulation routines
173 */
174struct tipc_port *tipc_createport_raw(void *usr_handle,
175 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
176 void (*wakeup)(struct tipc_port *), const u32 importance);
177
178int tipc_reject_msg(struct sk_buff *buf, u32 err);
179
180int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
181
182void tipc_acknowledge(u32 port_ref, u32 ack);
183
184int tipc_createport(unsigned int tipc_user, void *usr_handle,
185 unsigned int importance, tipc_msg_err_event error_cb,
186 tipc_named_msg_err_event named_error_cb,
187 tipc_conn_shutdown_event conn_error_cb, tipc_msg_event msg_cb,
188 tipc_named_msg_event named_msg_cb,
189 tipc_conn_msg_event conn_msg_cb,
190 tipc_continue_event continue_event_cb, u32 *portref);
191
192int tipc_deleteport(u32 portref);
193
194int tipc_portimportance(u32 portref, unsigned int *importance);
195int tipc_set_portimportance(u32 portref, unsigned int importance);
196
197int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
198int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
199
200int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
201int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
202
203int tipc_publish(u32 portref, unsigned int scope,
204 struct tipc_name_seq const *name_seq);
205int tipc_withdraw(u32 portref, unsigned int scope,
206 struct tipc_name_seq const *name_seq);
207
208int tipc_connect2port(u32 portref, struct tipc_portid const *port);
209
210int tipc_disconnect(u32 portref);
211
212int tipc_shutdown(u32 ref);
213
214
215/*
216 * The following routines require that the port be locked on entry
217 */
218int tipc_disconnect_port(struct tipc_port *tp_ptr);
219
220/*
221 * TIPC messaging routines
222 */
223int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect);
224
225int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
226 unsigned int num_sect, struct iovec const *msg_sect);
227
228int tipc_send2port(u32 portref, struct tipc_portid const *dest,
229 unsigned int num_sect, struct iovec const *msg_sect);
230
231int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest,
232 struct sk_buff *buf, unsigned int dsz);
233
234int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
235 unsigned int section_count, struct iovec const *msg);
236
112int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr, 237int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
113 struct iovec const *msg_sect, u32 num_sect, 238 struct iovec const *msg_sect, u32 num_sect,
114 int err); 239 int err);
115struct sk_buff *tipc_port_get_ports(void); 240struct sk_buff *tipc_port_get_ports(void);
116struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
117void tipc_port_recv_proto_msg(struct sk_buff *buf); 241void tipc_port_recv_proto_msg(struct sk_buff *buf);
118void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp); 242void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
119void tipc_port_reinit(void); 243void tipc_port_reinit(void);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e9f0d500448..cd0bb77f267 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -49,10 +49,9 @@
49 49
50#include <linux/tipc.h> 50#include <linux/tipc.h>
51#include <linux/tipc_config.h> 51#include <linux/tipc_config.h>
52#include <net/tipc/tipc_msg.h>
53#include <net/tipc/tipc_port.h>
54 52
55#include "core.h" 53#include "core.h"
54#include "port.h"
56 55
57#define SS_LISTENING -1 /* socket is listening */ 56#define SS_LISTENING -1 /* socket is listening */
58#define SS_READY -2 /* socket is connectionless */ 57#define SS_READY -2 /* socket is connectionless */
@@ -404,7 +403,8 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
404 addr->addr.id.ref = tsock->peer_name.ref; 403 addr->addr.id.ref = tsock->peer_name.ref;
405 addr->addr.id.node = tsock->peer_name.node; 404 addr->addr.id.node = tsock->peer_name.node;
406 } else { 405 } else {
407 tipc_ownidentity(tsock->p->ref, &addr->addr.id); 406 addr->addr.id.ref = tsock->p->ref;
407 addr->addr.id.node = tipc_own_addr;
408 } 408 }
409 409
410 *uaddr_len = sizeof(*addr); 410 *uaddr_len = sizeof(*addr);
@@ -597,7 +597,6 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
597 break; 597 break;
598 res = tipc_multicast(tport->ref, 598 res = tipc_multicast(tport->ref,
599 &dest->addr.nameseq, 599 &dest->addr.nameseq,
600 0,
601 m->msg_iovlen, 600 m->msg_iovlen,
602 m->msg_iov); 601 m->msg_iov);
603 } 602 }
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 33313961d01..23f43d03980 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -35,10 +35,8 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "name_table.h" 38#include "name_table.h"
40#include "port.h" 39#include "user_reg.h"
41#include "ref.h"
42#include "subscr.h" 40#include "subscr.h"
43 41
44/** 42/**
@@ -544,14 +542,14 @@ static void subscr_named_msg_event(void *usr_handle,
544int tipc_subscr_start(void) 542int tipc_subscr_start(void)
545{ 543{
546 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV}; 544 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
547 int res = -1; 545 int res;
548 546
549 memset(&topsrv, 0, sizeof (topsrv)); 547 memset(&topsrv, 0, sizeof (topsrv));
550 spin_lock_init(&topsrv.lock); 548 spin_lock_init(&topsrv.lock);
551 INIT_LIST_HEAD(&topsrv.subscriber_list); 549 INIT_LIST_HEAD(&topsrv.subscriber_list);
552 550
553 spin_lock_bh(&topsrv.lock); 551 spin_lock_bh(&topsrv.lock);
554 res = tipc_attach(&topsrv.user_ref, NULL, NULL); 552 res = tipc_attach(&topsrv.user_ref);
555 if (res) { 553 if (res) {
556 spin_unlock_bh(&topsrv.lock); 554 spin_unlock_bh(&topsrv.lock);
557 return res; 555 return res;
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 50692880316..2e2702e2049 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -50,15 +50,11 @@
50/** 50/**
51 * struct tipc_user - registered TIPC user info 51 * struct tipc_user - registered TIPC user info
52 * @next: index of next free registry entry (or -1 for an allocated entry) 52 * @next: index of next free registry entry (or -1 for an allocated entry)
53 * @callback: ptr to routine to call when TIPC mode changes (NULL if none)
54 * @usr_handle: user-defined value passed to callback routine
55 * @ports: list of user ports owned by the user 53 * @ports: list of user ports owned by the user
56 */ 54 */
57 55
58struct tipc_user { 56struct tipc_user {
59 int next; 57 int next;
60 tipc_mode_event callback;
61 void *usr_handle;
62 struct list_head ports; 58 struct list_head ports;
63}; 59};
64 60
@@ -95,41 +91,12 @@ static int reg_init(void)
95} 91}
96 92
97/** 93/**
98 * reg_callback - inform TIPC user about current operating mode
99 */
100
101static void reg_callback(struct tipc_user *user_ptr)
102{
103 tipc_mode_event cb;
104 void *arg;
105
106 spin_lock_bh(&reg_lock);
107 cb = user_ptr->callback;
108 arg = user_ptr->usr_handle;
109 spin_unlock_bh(&reg_lock);
110
111 if (cb)
112 cb(arg, tipc_mode, tipc_own_addr);
113}
114
115/**
116 * tipc_reg_start - activate TIPC user registry 94 * tipc_reg_start - activate TIPC user registry
117 */ 95 */
118 96
119int tipc_reg_start(void) 97int tipc_reg_start(void)
120{ 98{
121 u32 u; 99 return reg_init();
122 int res;
123
124 if ((res = reg_init()))
125 return res;
126
127 for (u = 1; u <= MAX_USERID; u++) {
128 if (users[u].callback)
129 tipc_k_signal((Handler)reg_callback,
130 (unsigned long)&users[u]);
131 }
132 return 0;
133} 100}
134 101
135/** 102/**
@@ -138,15 +105,9 @@ int tipc_reg_start(void)
138 105
139void tipc_reg_stop(void) 106void tipc_reg_stop(void)
140{ 107{
141 int id;
142
143 if (!users) 108 if (!users)
144 return; 109 return;
145 110
146 for (id = 1; id <= MAX_USERID; id++) {
147 if (users[id].callback)
148 reg_callback(&users[id]);
149 }
150 kfree(users); 111 kfree(users);
151 users = NULL; 112 users = NULL;
152} 113}
@@ -157,12 +118,10 @@ void tipc_reg_stop(void)
157 * NOTE: This routine may be called when TIPC is inactive. 118 * NOTE: This routine may be called when TIPC is inactive.
158 */ 119 */
159 120
160int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle) 121int tipc_attach(u32 *userid)
161{ 122{
162 struct tipc_user *user_ptr; 123 struct tipc_user *user_ptr;
163 124
164 if ((tipc_mode == TIPC_NOT_RUNNING) && !cb)
165 return -ENOPROTOOPT;
166 if (!users) 125 if (!users)
167 reg_init(); 126 reg_init();
168 127
@@ -177,13 +136,9 @@ int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
177 user_ptr->next = -1; 136 user_ptr->next = -1;
178 spin_unlock_bh(&reg_lock); 137 spin_unlock_bh(&reg_lock);
179 138
180 user_ptr->callback = cb;
181 user_ptr->usr_handle = usr_handle;
182 INIT_LIST_HEAD(&user_ptr->ports); 139 INIT_LIST_HEAD(&user_ptr->ports);
183 atomic_inc(&tipc_user_count); 140 atomic_inc(&tipc_user_count);
184 141
185 if (cb && (tipc_mode != TIPC_NOT_RUNNING))
186 tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
187 return 0; 142 return 0;
188} 143}
189 144
@@ -207,7 +162,6 @@ void tipc_detach(u32 userid)
207 } 162 }
208 163
209 user_ptr = &users[userid]; 164 user_ptr = &users[userid];
210 user_ptr->callback = NULL;
211 INIT_LIST_HEAD(&ports_temp); 165 INIT_LIST_HEAD(&ports_temp);
212 list_splice(&user_ptr->ports, &ports_temp); 166 list_splice(&user_ptr->ports, &ports_temp);
213 user_ptr->next = next_free_user; 167 user_ptr->next = next_free_user;
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
index 81dc12e2882..109eed0d6de 100644
--- a/net/tipc/user_reg.h
+++ b/net/tipc/user_reg.h
@@ -42,6 +42,9 @@
42int tipc_reg_start(void); 42int tipc_reg_start(void);
43void tipc_reg_stop(void); 43void tipc_reg_stop(void);
44 44
45int tipc_attach(unsigned int *userref);
46void tipc_detach(unsigned int userref);
47
45int tipc_reg_add_port(struct user_port *up_ptr); 48int tipc_reg_add_port(struct user_port *up_ptr);
46int tipc_reg_remove_port(struct user_port *up_ptr); 49int tipc_reg_remove_port(struct user_port *up_ptr);
47 50
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 83f8b5e91fc..1b61ca8c48e 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -36,9 +36,6 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "zone.h" 38#include "zone.h"
39#include "net.h"
40#include "addr.h"
41#include "node_subscr.h"
42#include "cluster.h" 39#include "cluster.h"
43#include "node.h" 40#include "node.h"
44 41
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2268e679812..417d7a6c36c 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -316,7 +316,8 @@ static void unix_write_space(struct sock *sk)
316 if (unix_writable(sk)) { 316 if (unix_writable(sk)) {
317 wq = rcu_dereference(sk->sk_wq); 317 wq = rcu_dereference(sk->sk_wq);
318 if (wq_has_sleeper(wq)) 318 if (wq_has_sleeper(wq))
319 wake_up_interruptible_sync(&wq->wait); 319 wake_up_interruptible_sync_poll(&wq->wait,
320 POLLOUT | POLLWRNORM | POLLWRBAND);
320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 321 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
321 } 322 }
322 rcu_read_unlock(); 323 rcu_read_unlock();
@@ -1736,7 +1737,8 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1736 goto out_unlock; 1737 goto out_unlock;
1737 } 1738 }
1738 1739
1739 wake_up_interruptible_sync(&u->peer_wait); 1740 wake_up_interruptible_sync_poll(&u->peer_wait,
1741 POLLOUT | POLLWRNORM | POLLWRBAND);
1740 1742
1741 if (msg->msg_name) 1743 if (msg->msg_name)
1742 unix_copy_addr(msg, skb->sk); 1744 unix_copy_addr(msg, skb->sk);
@@ -2099,13 +2101,12 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2099 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 2101 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2100 mask |= POLLERR; 2102 mask |= POLLERR;
2101 if (sk->sk_shutdown & RCV_SHUTDOWN) 2103 if (sk->sk_shutdown & RCV_SHUTDOWN)
2102 mask |= POLLRDHUP; 2104 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2103 if (sk->sk_shutdown == SHUTDOWN_MASK) 2105 if (sk->sk_shutdown == SHUTDOWN_MASK)
2104 mask |= POLLHUP; 2106 mask |= POLLHUP;
2105 2107
2106 /* readable? */ 2108 /* readable? */
2107 if (!skb_queue_empty(&sk->sk_receive_queue) || 2109 if (!skb_queue_empty(&sk->sk_receive_queue))
2108 (sk->sk_shutdown & RCV_SHUTDOWN))
2109 mask |= POLLIN | POLLRDNORM; 2110 mask |= POLLIN | POLLRDNORM;
2110 2111
2111 /* Connection-based need to check for termination and startup */ 2112 /* Connection-based need to check for termination and startup */
@@ -2117,20 +2118,19 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2117 return mask; 2118 return mask;
2118 } 2119 }
2119 2120
2120 /* writable? */ 2121 /* No write status requested, avoid expensive OUT tests. */
2121 writable = unix_writable(sk); 2122 if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
2122 if (writable) { 2123 return mask;
2123 other = unix_peer_get(sk);
2124 if (other) {
2125 if (unix_peer(other) != sk) {
2126 sock_poll_wait(file, &unix_sk(other)->peer_wait,
2127 wait);
2128 if (unix_recvq_full(other))
2129 writable = 0;
2130 }
2131 2124
2132 sock_put(other); 2125 writable = unix_writable(sk);
2126 other = unix_peer_get(sk);
2127 if (other) {
2128 if (unix_peer(other) != sk) {
2129 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2130 if (unix_recvq_full(other))
2131 writable = 0;
2133 } 2132 }
2133 sock_put(other);
2134 } 2134 }
2135 2135
2136 if (writable) 2136 if (writable)
diff --git a/net/wanrouter/Makefile b/net/wanrouter/Makefile
index 9f188ab3dcd..4da14bc4807 100644
--- a/net/wanrouter/Makefile
+++ b/net/wanrouter/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_WAN_ROUTER) += wanrouter.o 5obj-$(CONFIG_WAN_ROUTER) += wanrouter.o
6 6
7wanrouter-objs := wanproc.o wanmain.o 7wanrouter-y := wanproc.o wanmain.o
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 9c21ebf9780..630bcf0a2f0 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -4,6 +4,8 @@
4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
7#include <linux/if.h> 9#include <linux/if.h>
8#include <linux/module.h> 10#include <linux/module.h>
9#include <linux/err.h> 11#include <linux/err.h>
@@ -216,8 +218,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
216 rdev->wiphy.debugfsdir, 218 rdev->wiphy.debugfsdir,
217 rdev->wiphy.debugfsdir->d_parent, 219 rdev->wiphy.debugfsdir->d_parent,
218 newname)) 220 newname))
219 printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n", 221 pr_err("failed to rename debugfs dir to %s!\n", newname);
220 newname);
221 222
222 nl80211_notify_dev_rename(rdev); 223 nl80211_notify_dev_rename(rdev);
223 224
@@ -699,8 +700,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
699 700
700 if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, 701 if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
701 "phy80211")) { 702 "phy80211")) {
702 printk(KERN_ERR "wireless: failed to add phy80211 " 703 pr_err("failed to add phy80211 symlink to netdev!\n");
703 "symlink to netdev!\n");
704 } 704 }
705 wdev->netdev = dev; 705 wdev->netdev = dev;
706 wdev->sme_state = CFG80211_SME_IDLE; 706 wdev->sme_state = CFG80211_SME_IDLE;
diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c
index 97d411f7450..3268fac5ab2 100644
--- a/net/wireless/lib80211.c
+++ b/net/wireless/lib80211.c
@@ -13,6 +13,8 @@
13 * 13 *
14 */ 14 */
15 15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
16#include <linux/module.h> 18#include <linux/module.h>
17#include <linux/ctype.h> 19#include <linux/ctype.h>
18#include <linux/ieee80211.h> 20#include <linux/ieee80211.h>
@@ -224,8 +226,8 @@ int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops)
224 return -EINVAL; 226 return -EINVAL;
225 227
226 found: 228 found:
227 printk(KERN_DEBUG "lib80211_crypt: unregistered algorithm " 229 printk(KERN_DEBUG "lib80211_crypt: unregistered algorithm '%s'\n",
228 "'%s'\n", ops->name); 230 ops->name);
229 list_del(&alg->list); 231 list_del(&alg->list);
230 spin_unlock_irqrestore(&lib80211_crypto_lock, flags); 232 spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
231 kfree(alg); 233 kfree(alg);
@@ -270,7 +272,7 @@ static struct lib80211_crypto_ops lib80211_crypt_null = {
270 272
271static int __init lib80211_init(void) 273static int __init lib80211_init(void)
272{ 274{
273 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION "\n"); 275 pr_info(DRV_DESCRIPTION "\n");
274 return lib80211_register_crypto_ops(&lib80211_crypt_null); 276 return lib80211_register_crypto_ops(&lib80211_crypt_null);
275} 277}
276 278
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 0fe40510e2c..7ea4f2b0770 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -10,6 +10,8 @@
10 * more details. 10 * more details.
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/err.h> 15#include <linux/err.h>
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/init.h> 17#include <linux/init.h>
@@ -99,8 +101,7 @@ static void *lib80211_tkip_init(int key_idx)
99 priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, 101 priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
100 CRYPTO_ALG_ASYNC); 102 CRYPTO_ALG_ASYNC);
101 if (IS_ERR(priv->tx_tfm_arc4)) { 103 if (IS_ERR(priv->tx_tfm_arc4)) {
102 printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate " 104 printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
103 "crypto API arc4\n");
104 priv->tx_tfm_arc4 = NULL; 105 priv->tx_tfm_arc4 = NULL;
105 goto fail; 106 goto fail;
106 } 107 }
@@ -108,8 +109,7 @@ static void *lib80211_tkip_init(int key_idx)
108 priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0, 109 priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
109 CRYPTO_ALG_ASYNC); 110 CRYPTO_ALG_ASYNC);
110 if (IS_ERR(priv->tx_tfm_michael)) { 111 if (IS_ERR(priv->tx_tfm_michael)) {
111 printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate " 112 printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
112 "crypto API michael_mic\n");
113 priv->tx_tfm_michael = NULL; 113 priv->tx_tfm_michael = NULL;
114 goto fail; 114 goto fail;
115 } 115 }
@@ -117,8 +117,7 @@ static void *lib80211_tkip_init(int key_idx)
117 priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, 117 priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
118 CRYPTO_ALG_ASYNC); 118 CRYPTO_ALG_ASYNC);
119 if (IS_ERR(priv->rx_tfm_arc4)) { 119 if (IS_ERR(priv->rx_tfm_arc4)) {
120 printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate " 120 printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
121 "crypto API arc4\n");
122 priv->rx_tfm_arc4 = NULL; 121 priv->rx_tfm_arc4 = NULL;
123 goto fail; 122 goto fail;
124 } 123 }
@@ -126,8 +125,7 @@ static void *lib80211_tkip_init(int key_idx)
126 priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0, 125 priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
127 CRYPTO_ALG_ASYNC); 126 CRYPTO_ALG_ASYNC);
128 if (IS_ERR(priv->rx_tfm_michael)) { 127 if (IS_ERR(priv->rx_tfm_michael)) {
129 printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate " 128 printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
130 "crypto API michael_mic\n");
131 priv->rx_tfm_michael = NULL; 129 priv->rx_tfm_michael = NULL;
132 goto fail; 130 goto fail;
133 } 131 }
@@ -536,7 +534,7 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
536 struct scatterlist sg[2]; 534 struct scatterlist sg[2];
537 535
538 if (tfm_michael == NULL) { 536 if (tfm_michael == NULL) {
539 printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); 537 pr_warn("%s(): tfm_michael == NULL\n", __func__);
540 return -1; 538 return -1;
541 } 539 }
542 sg_init_table(sg, 2); 540 sg_init_table(sg, 2);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 26838d903b9..6980a0c315b 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -1028,3 +1028,15 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
1028 nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp); 1028 nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp);
1029} 1029}
1030EXPORT_SYMBOL(cfg80211_cqm_rssi_notify); 1030EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
1031
1032void cfg80211_cqm_pktloss_notify(struct net_device *dev,
1033 const u8 *peer, u32 num_packets, gfp_t gfp)
1034{
1035 struct wireless_dev *wdev = dev->ieee80211_ptr;
1036 struct wiphy *wiphy = wdev->wiphy;
1037 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
1038
1039 /* Indicate roaming trigger event to user space */
1040 nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp);
1041}
1042EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4e78e3f2679..67ff7e92cb9 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -166,7 +166,13 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
166 166
167 [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 }, 167 [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 },
168 [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 }, 168 [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 },
169
169 [NL80211_ATTR_FRAME_TYPE] = { .type = NLA_U16 }, 170 [NL80211_ATTR_FRAME_TYPE] = { .type = NLA_U16 },
171
172 [NL80211_ATTR_WIPHY_ANTENNA_TX] = { .type = NLA_U32 },
173 [NL80211_ATTR_WIPHY_ANTENNA_RX] = { .type = NLA_U32 },
174
175 [NL80211_ATTR_MCAST_RATE] = { .type = NLA_U32 },
170}; 176};
171 177
172/* policy for the key attributes */ 178/* policy for the key attributes */
@@ -526,7 +532,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
526 dev->wiphy.rts_threshold); 532 dev->wiphy.rts_threshold);
527 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, 533 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
528 dev->wiphy.coverage_class); 534 dev->wiphy.coverage_class);
529
530 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 535 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
531 dev->wiphy.max_scan_ssids); 536 dev->wiphy.max_scan_ssids);
532 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, 537 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
@@ -545,6 +550,16 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
545 if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) 550 if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL)
546 NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE); 551 NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE);
547 552
553 if (dev->ops->get_antenna) {
554 u32 tx_ant = 0, rx_ant = 0;
555 int res;
556 res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant);
557 if (!res) {
558 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant);
559 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant);
560 }
561 }
562
548 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); 563 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
549 if (!nl_modes) 564 if (!nl_modes)
550 goto nla_put_failure; 565 goto nla_put_failure;
@@ -1024,6 +1039,22 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1024 goto bad_res; 1039 goto bad_res;
1025 } 1040 }
1026 1041
1042 if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] &&
1043 info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]) {
1044 u32 tx_ant, rx_ant;
1045 if (!rdev->ops->set_antenna) {
1046 result = -EOPNOTSUPP;
1047 goto bad_res;
1048 }
1049
1050 tx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX]);
1051 rx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]);
1052
1053 result = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant);
1054 if (result)
1055 goto bad_res;
1056 }
1057
1027 changed = 0; 1058 changed = 0;
1028 1059
1029 if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) { 1060 if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) {
@@ -3569,6 +3600,34 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
3569 local_state_change); 3600 local_state_change);
3570} 3601}
3571 3602
3603static bool
3604nl80211_parse_mcast_rate(struct cfg80211_registered_device *rdev,
3605 int mcast_rate[IEEE80211_NUM_BANDS],
3606 int rateval)
3607{
3608 struct wiphy *wiphy = &rdev->wiphy;
3609 bool found = false;
3610 int band, i;
3611
3612 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
3613 struct ieee80211_supported_band *sband;
3614
3615 sband = wiphy->bands[band];
3616 if (!sband)
3617 continue;
3618
3619 for (i = 0; i < sband->n_bitrates; i++) {
3620 if (sband->bitrates[i].bitrate == rateval) {
3621 mcast_rate[band] = i + 1;
3622 found = true;
3623 break;
3624 }
3625 }
3626 }
3627
3628 return found;
3629}
3630
3572static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) 3631static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
3573{ 3632{
3574 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3633 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -3653,6 +3712,11 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
3653 } 3712 }
3654 } 3713 }
3655 3714
3715 if (info->attrs[NL80211_ATTR_MCAST_RATE] &&
3716 !nl80211_parse_mcast_rate(rdev, ibss.mcast_rate,
3717 nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
3718 return -EINVAL;
3719
3656 if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { 3720 if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) {
3657 connkeys = nl80211_parse_connkeys(rdev, 3721 connkeys = nl80211_parse_connkeys(rdev,
3658 info->attrs[NL80211_ATTR_KEYS]); 3722 info->attrs[NL80211_ATTR_KEYS]);
@@ -5651,6 +5715,51 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
5651 nlmsg_free(msg); 5715 nlmsg_free(msg);
5652} 5716}
5653 5717
5718void
5719nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
5720 struct net_device *netdev, const u8 *peer,
5721 u32 num_packets, gfp_t gfp)
5722{
5723 struct sk_buff *msg;
5724 struct nlattr *pinfoattr;
5725 void *hdr;
5726
5727 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
5728 if (!msg)
5729 return;
5730
5731 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
5732 if (!hdr) {
5733 nlmsg_free(msg);
5734 return;
5735 }
5736
5737 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5738 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5739 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer);
5740
5741 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
5742 if (!pinfoattr)
5743 goto nla_put_failure;
5744
5745 NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets);
5746
5747 nla_nest_end(msg, pinfoattr);
5748
5749 if (genlmsg_end(msg, hdr) < 0) {
5750 nlmsg_free(msg);
5751 return;
5752 }
5753
5754 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5755 nl80211_mlme_mcgrp.id, gfp);
5756 return;
5757
5758 nla_put_failure:
5759 genlmsg_cancel(msg, hdr);
5760 nlmsg_free(msg);
5761}
5762
5654static int nl80211_netlink_notify(struct notifier_block * nb, 5763static int nl80211_netlink_notify(struct notifier_block * nb,
5655 unsigned long state, 5764 unsigned long state,
5656 void *_notify) 5765 void *_notify)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 30d2f939150..16c2f719076 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -87,5 +87,9 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
87 struct net_device *netdev, 87 struct net_device *netdev,
88 enum nl80211_cqm_rssi_threshold_event rssi_event, 88 enum nl80211_cqm_rssi_threshold_event rssi_event,
89 gfp_t gfp); 89 gfp_t gfp);
90void
91nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
92 struct net_device *netdev, const u8 *peer,
93 u32 num_packets, gfp_t gfp);
90 94
91#endif /* __NET_WIRELESS_NL80211_H */ 95#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4b9f8912526..5ed615f94e0 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -32,6 +32,9 @@
32 * rely on some SHA1 checksum of the regdomain for example. 32 * rely on some SHA1 checksum of the regdomain for example.
33 * 33 *
34 */ 34 */
35
36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
35#include <linux/kernel.h> 38#include <linux/kernel.h>
36#include <linux/slab.h> 39#include <linux/slab.h>
37#include <linux/list.h> 40#include <linux/list.h>
@@ -48,7 +51,7 @@
48#ifdef CONFIG_CFG80211_REG_DEBUG 51#ifdef CONFIG_CFG80211_REG_DEBUG
49#define REG_DBG_PRINT(format, args...) \ 52#define REG_DBG_PRINT(format, args...) \
50 do { \ 53 do { \
51 printk(KERN_DEBUG format , ## args); \ 54 printk(KERN_DEBUG pr_fmt(format), ##args); \
52 } while (0) 55 } while (0)
53#else 56#else
54#define REG_DBG_PRINT(args...) 57#define REG_DBG_PRINT(args...)
@@ -96,6 +99,9 @@ struct reg_beacon {
96 struct ieee80211_channel chan; 99 struct ieee80211_channel chan;
97}; 100};
98 101
102static void reg_todo(struct work_struct *work);
103static DECLARE_WORK(reg_work, reg_todo);
104
99/* We keep a static world regulatory domain in case of the absence of CRDA */ 105/* We keep a static world regulatory domain in case of the absence of CRDA */
100static const struct ieee80211_regdomain world_regdom = { 106static const struct ieee80211_regdomain world_regdom = {
101 .n_reg_rules = 5, 107 .n_reg_rules = 5,
@@ -367,11 +373,10 @@ static int call_crda(const char *alpha2)
367 }; 373 };
368 374
369 if (!is_world_regdom((char *) alpha2)) 375 if (!is_world_regdom((char *) alpha2))
370 printk(KERN_INFO "cfg80211: Calling CRDA for country: %c%c\n", 376 pr_info("Calling CRDA for country: %c%c\n",
371 alpha2[0], alpha2[1]); 377 alpha2[0], alpha2[1]);
372 else 378 else
373 printk(KERN_INFO "cfg80211: Calling CRDA to update world " 379 pr_info("Calling CRDA to update world regulatory domain\n");
374 "regulatory domain\n");
375 380
376 /* query internal regulatory database (if it exists) */ 381 /* query internal regulatory database (if it exists) */
377 reg_regdb_query(alpha2); 382 reg_regdb_query(alpha2);
@@ -711,6 +716,60 @@ int freq_reg_info(struct wiphy *wiphy,
711} 716}
712EXPORT_SYMBOL(freq_reg_info); 717EXPORT_SYMBOL(freq_reg_info);
713 718
719#ifdef CONFIG_CFG80211_REG_DEBUG
720static const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
721{
722 switch (initiator) {
723 case NL80211_REGDOM_SET_BY_CORE:
724 return "Set by core";
725 case NL80211_REGDOM_SET_BY_USER:
726 return "Set by user";
727 case NL80211_REGDOM_SET_BY_DRIVER:
728 return "Set by driver";
729 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
730 return "Set by country IE";
731 default:
732 WARN_ON(1);
733 return "Set by bug";
734 }
735}
736
737static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
738 u32 desired_bw_khz,
739 const struct ieee80211_reg_rule *reg_rule)
740{
741 const struct ieee80211_power_rule *power_rule;
742 const struct ieee80211_freq_range *freq_range;
743 char max_antenna_gain[32];
744
745 power_rule = &reg_rule->power_rule;
746 freq_range = &reg_rule->freq_range;
747
748 if (!power_rule->max_antenna_gain)
749 snprintf(max_antenna_gain, 32, "N/A");
750 else
751 snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain);
752
753 REG_DBG_PRINT("Updating information on frequency %d MHz "
754 "for %d a MHz width channel with regulatory rule:\n",
755 chan->center_freq,
756 KHZ_TO_MHZ(desired_bw_khz));
757
758 REG_DBG_PRINT("%d KHz - %d KHz @ KHz), (%s mBi, %d mBm)\n",
759 freq_range->start_freq_khz,
760 freq_range->end_freq_khz,
761 max_antenna_gain,
762 power_rule->max_eirp);
763}
764#else
765static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
766 u32 desired_bw_khz,
767 const struct ieee80211_reg_rule *reg_rule)
768{
769 return;
770}
771#endif
772
714/* 773/*
715 * Note that right now we assume the desired channel bandwidth 774 * Note that right now we assume the desired channel bandwidth
716 * is always 20 MHz for each individual channel (HT40 uses 20 MHz 775 * is always 20 MHz for each individual channel (HT40 uses 20 MHz
@@ -720,7 +779,9 @@ EXPORT_SYMBOL(freq_reg_info);
720 * on the wiphy with the target_bw specified. Then we can simply use 779 * on the wiphy with the target_bw specified. Then we can simply use
721 * that below for the desired_bw_khz below. 780 * that below for the desired_bw_khz below.
722 */ 781 */
723static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, 782static void handle_channel(struct wiphy *wiphy,
783 enum nl80211_reg_initiator initiator,
784 enum ieee80211_band band,
724 unsigned int chan_idx) 785 unsigned int chan_idx)
725{ 786{
726 int r; 787 int r;
@@ -748,8 +809,27 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
748 desired_bw_khz, 809 desired_bw_khz,
749 &reg_rule); 810 &reg_rule);
750 811
751 if (r) 812 if (r) {
813 /*
814 * We will disable all channels that do not match our
815 * recieved regulatory rule unless the hint is coming
816 * from a Country IE and the Country IE had no information
817 * about a band. The IEEE 802.11 spec allows for an AP
818 * to send only a subset of the regulatory rules allowed,
819 * so an AP in the US that only supports 2.4 GHz may only send
820 * a country IE with information for the 2.4 GHz band
821 * while 5 GHz is still supported.
822 */
823 if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
824 r == -ERANGE)
825 return;
826
827 REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
828 chan->flags = IEEE80211_CHAN_DISABLED;
752 return; 829 return;
830 }
831
832 chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule);
753 833
754 power_rule = &reg_rule->power_rule; 834 power_rule = &reg_rule->power_rule;
755 freq_range = &reg_rule->freq_range; 835 freq_range = &reg_rule->freq_range;
@@ -784,7 +864,9 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
784 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); 864 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
785} 865}
786 866
787static void handle_band(struct wiphy *wiphy, enum ieee80211_band band) 867static void handle_band(struct wiphy *wiphy,
868 enum ieee80211_band band,
869 enum nl80211_reg_initiator initiator)
788{ 870{
789 unsigned int i; 871 unsigned int i;
790 struct ieee80211_supported_band *sband; 872 struct ieee80211_supported_band *sband;
@@ -793,24 +875,42 @@ static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
793 sband = wiphy->bands[band]; 875 sband = wiphy->bands[band];
794 876
795 for (i = 0; i < sband->n_channels; i++) 877 for (i = 0; i < sband->n_channels; i++)
796 handle_channel(wiphy, band, i); 878 handle_channel(wiphy, initiator, band, i);
797} 879}
798 880
799static bool ignore_reg_update(struct wiphy *wiphy, 881static bool ignore_reg_update(struct wiphy *wiphy,
800 enum nl80211_reg_initiator initiator) 882 enum nl80211_reg_initiator initiator)
801{ 883{
802 if (!last_request) 884 if (!last_request) {
885 REG_DBG_PRINT("Ignoring regulatory request %s since "
886 "last_request is not set\n",
887 reg_initiator_name(initiator));
803 return true; 888 return true;
889 }
890
804 if (initiator == NL80211_REGDOM_SET_BY_CORE && 891 if (initiator == NL80211_REGDOM_SET_BY_CORE &&
805 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) 892 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
893 REG_DBG_PRINT("Ignoring regulatory request %s "
894 "since the driver uses its own custom "
895 "regulatory domain ",
896 reg_initiator_name(initiator));
806 return true; 897 return true;
898 }
899
807 /* 900 /*
808 * wiphy->regd will be set once the device has its own 901 * wiphy->regd will be set once the device has its own
809 * desired regulatory domain set 902 * desired regulatory domain set
810 */ 903 */
811 if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd && 904 if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
812 !is_world_regdom(last_request->alpha2)) 905 initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
906 !is_world_regdom(last_request->alpha2)) {
907 REG_DBG_PRINT("Ignoring regulatory request %s "
908 "since the driver requires its own regulaotry "
909 "domain to be set first",
910 reg_initiator_name(initiator));
813 return true; 911 return true;
912 }
913
814 return false; 914 return false;
815} 915}
816 916
@@ -1030,7 +1130,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
1030 goto out; 1130 goto out;
1031 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1131 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1032 if (wiphy->bands[band]) 1132 if (wiphy->bands[band])
1033 handle_band(wiphy, band); 1133 handle_band(wiphy, band, initiator);
1034 } 1134 }
1035out: 1135out:
1036 reg_process_beacons(wiphy); 1136 reg_process_beacons(wiphy);
@@ -1066,10 +1166,17 @@ static void handle_channel_custom(struct wiphy *wiphy,
1066 regd); 1166 regd);
1067 1167
1068 if (r) { 1168 if (r) {
1169 REG_DBG_PRINT("Disabling freq %d MHz as custom "
1170 "regd has no rule that fits a %d MHz "
1171 "wide channel\n",
1172 chan->center_freq,
1173 KHZ_TO_MHZ(desired_bw_khz));
1069 chan->flags = IEEE80211_CHAN_DISABLED; 1174 chan->flags = IEEE80211_CHAN_DISABLED;
1070 return; 1175 return;
1071 } 1176 }
1072 1177
1178 chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule);
1179
1073 power_rule = &reg_rule->power_rule; 1180 power_rule = &reg_rule->power_rule;
1074 freq_range = &reg_rule->freq_range; 1181 freq_range = &reg_rule->freq_range;
1075 1182
@@ -1215,6 +1322,21 @@ static int ignore_request(struct wiphy *wiphy,
1215 return -EINVAL; 1322 return -EINVAL;
1216} 1323}
1217 1324
1325static void reg_set_request_processed(void)
1326{
1327 bool need_more_processing = false;
1328
1329 last_request->processed = true;
1330
1331 spin_lock(&reg_requests_lock);
1332 if (!list_empty(&reg_requests_list))
1333 need_more_processing = true;
1334 spin_unlock(&reg_requests_lock);
1335
1336 if (need_more_processing)
1337 schedule_work(&reg_work);
1338}
1339
1218/** 1340/**
1219 * __regulatory_hint - hint to the wireless core a regulatory domain 1341 * __regulatory_hint - hint to the wireless core a regulatory domain
1220 * @wiphy: if the hint comes from country information from an AP, this 1342 * @wiphy: if the hint comes from country information from an AP, this
@@ -1290,8 +1412,10 @@ new_request:
1290 * have applied the requested regulatory domain before we just 1412 * have applied the requested regulatory domain before we just
1291 * inform userspace we have processed the request 1413 * inform userspace we have processed the request
1292 */ 1414 */
1293 if (r == -EALREADY) 1415 if (r == -EALREADY) {
1294 nl80211_send_reg_change_event(last_request); 1416 nl80211_send_reg_change_event(last_request);
1417 reg_set_request_processed();
1418 }
1295 return r; 1419 return r;
1296 } 1420 }
1297 1421
@@ -1307,16 +1431,13 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1307 1431
1308 BUG_ON(!reg_request->alpha2); 1432 BUG_ON(!reg_request->alpha2);
1309 1433
1310 mutex_lock(&cfg80211_mutex);
1311 mutex_lock(&reg_mutex);
1312
1313 if (wiphy_idx_valid(reg_request->wiphy_idx)) 1434 if (wiphy_idx_valid(reg_request->wiphy_idx))
1314 wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); 1435 wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
1315 1436
1316 if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && 1437 if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
1317 !wiphy) { 1438 !wiphy) {
1318 kfree(reg_request); 1439 kfree(reg_request);
1319 goto out; 1440 return;
1320 } 1441 }
1321 1442
1322 r = __regulatory_hint(wiphy, reg_request); 1443 r = __regulatory_hint(wiphy, reg_request);
@@ -1324,28 +1445,46 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1324 if (r == -EALREADY && wiphy && 1445 if (r == -EALREADY && wiphy &&
1325 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) 1446 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
1326 wiphy_update_regulatory(wiphy, initiator); 1447 wiphy_update_regulatory(wiphy, initiator);
1327out:
1328 mutex_unlock(&reg_mutex);
1329 mutex_unlock(&cfg80211_mutex);
1330} 1448}
1331 1449
1332/* Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* */ 1450/*
1451 * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
1452 * Regulatory hints come on a first come first serve basis and we
1453 * must process each one atomically.
1454 */
1333static void reg_process_pending_hints(void) 1455static void reg_process_pending_hints(void)
1334 { 1456{
1335 struct regulatory_request *reg_request; 1457 struct regulatory_request *reg_request;
1336 1458
1459 mutex_lock(&cfg80211_mutex);
1460 mutex_lock(&reg_mutex);
1461
1462 /* When last_request->processed becomes true this will be rescheduled */
1463 if (last_request && !last_request->processed) {
1464 REG_DBG_PRINT("Pending regulatory request, waiting "
1465 "for it to be processed...");
1466 goto out;
1467 }
1468
1337 spin_lock(&reg_requests_lock); 1469 spin_lock(&reg_requests_lock);
1338 while (!list_empty(&reg_requests_list)) {
1339 reg_request = list_first_entry(&reg_requests_list,
1340 struct regulatory_request,
1341 list);
1342 list_del_init(&reg_request->list);
1343 1470
1471 if (list_empty(&reg_requests_list)) {
1344 spin_unlock(&reg_requests_lock); 1472 spin_unlock(&reg_requests_lock);
1345 reg_process_hint(reg_request); 1473 goto out;
1346 spin_lock(&reg_requests_lock);
1347 } 1474 }
1475
1476 reg_request = list_first_entry(&reg_requests_list,
1477 struct regulatory_request,
1478 list);
1479 list_del_init(&reg_request->list);
1480
1348 spin_unlock(&reg_requests_lock); 1481 spin_unlock(&reg_requests_lock);
1482
1483 reg_process_hint(reg_request);
1484
1485out:
1486 mutex_unlock(&reg_mutex);
1487 mutex_unlock(&cfg80211_mutex);
1349} 1488}
1350 1489
1351/* Processes beacon hints -- this has nothing to do with country IEs */ 1490/* Processes beacon hints -- this has nothing to do with country IEs */
@@ -1392,8 +1531,6 @@ static void reg_todo(struct work_struct *work)
1392 reg_process_pending_beacon_hints(); 1531 reg_process_pending_beacon_hints();
1393} 1532}
1394 1533
1395static DECLARE_WORK(reg_work, reg_todo);
1396
1397static void queue_regulatory_request(struct regulatory_request *request) 1534static void queue_regulatory_request(struct regulatory_request *request)
1398{ 1535{
1399 if (isalpha(request->alpha2[0])) 1536 if (isalpha(request->alpha2[0]))
@@ -1428,12 +1565,7 @@ static int regulatory_hint_core(const char *alpha2)
1428 request->alpha2[1] = alpha2[1]; 1565 request->alpha2[1] = alpha2[1];
1429 request->initiator = NL80211_REGDOM_SET_BY_CORE; 1566 request->initiator = NL80211_REGDOM_SET_BY_CORE;
1430 1567
1431 /* 1568 queue_regulatory_request(request);
1432 * This ensures last_request is populated once modules
1433 * come swinging in and calling regulatory hints and
1434 * wiphy_apply_custom_regulatory().
1435 */
1436 reg_process_hint(request);
1437 1569
1438 return 0; 1570 return 0;
1439} 1571}
@@ -1559,7 +1691,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
1559 if (is_user_regdom_saved()) { 1691 if (is_user_regdom_saved()) {
1560 /* Unless we're asked to ignore it and reset it */ 1692 /* Unless we're asked to ignore it and reset it */
1561 if (reset_user) { 1693 if (reset_user) {
1562 REG_DBG_PRINT("cfg80211: Restoring regulatory settings " 1694 REG_DBG_PRINT("Restoring regulatory settings "
1563 "including user preference\n"); 1695 "including user preference\n");
1564 user_alpha2[0] = '9'; 1696 user_alpha2[0] = '9';
1565 user_alpha2[1] = '7'; 1697 user_alpha2[1] = '7';
@@ -1570,7 +1702,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
1570 * back as they were for a full restore. 1702 * back as they were for a full restore.
1571 */ 1703 */
1572 if (!is_world_regdom(ieee80211_regdom)) { 1704 if (!is_world_regdom(ieee80211_regdom)) {
1573 REG_DBG_PRINT("cfg80211: Keeping preference on " 1705 REG_DBG_PRINT("Keeping preference on "
1574 "module parameter ieee80211_regdom: %c%c\n", 1706 "module parameter ieee80211_regdom: %c%c\n",
1575 ieee80211_regdom[0], 1707 ieee80211_regdom[0],
1576 ieee80211_regdom[1]); 1708 ieee80211_regdom[1]);
@@ -1578,7 +1710,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
1578 alpha2[1] = ieee80211_regdom[1]; 1710 alpha2[1] = ieee80211_regdom[1];
1579 } 1711 }
1580 } else { 1712 } else {
1581 REG_DBG_PRINT("cfg80211: Restoring regulatory settings " 1713 REG_DBG_PRINT("Restoring regulatory settings "
1582 "while preserving user preference for: %c%c\n", 1714 "while preserving user preference for: %c%c\n",
1583 user_alpha2[0], 1715 user_alpha2[0],
1584 user_alpha2[1]); 1716 user_alpha2[1]);
@@ -1586,14 +1718,14 @@ static void restore_alpha2(char *alpha2, bool reset_user)
1586 alpha2[1] = user_alpha2[1]; 1718 alpha2[1] = user_alpha2[1];
1587 } 1719 }
1588 } else if (!is_world_regdom(ieee80211_regdom)) { 1720 } else if (!is_world_regdom(ieee80211_regdom)) {
1589 REG_DBG_PRINT("cfg80211: Keeping preference on " 1721 REG_DBG_PRINT("Keeping preference on "
1590 "module parameter ieee80211_regdom: %c%c\n", 1722 "module parameter ieee80211_regdom: %c%c\n",
1591 ieee80211_regdom[0], 1723 ieee80211_regdom[0],
1592 ieee80211_regdom[1]); 1724 ieee80211_regdom[1]);
1593 alpha2[0] = ieee80211_regdom[0]; 1725 alpha2[0] = ieee80211_regdom[0];
1594 alpha2[1] = ieee80211_regdom[1]; 1726 alpha2[1] = ieee80211_regdom[1];
1595 } else 1727 } else
1596 REG_DBG_PRINT("cfg80211: Restoring regulatory settings\n"); 1728 REG_DBG_PRINT("Restoring regulatory settings\n");
1597} 1729}
1598 1730
1599/* 1731/*
@@ -1661,7 +1793,7 @@ static void restore_regulatory_settings(bool reset_user)
1661 1793
1662void regulatory_hint_disconnect(void) 1794void regulatory_hint_disconnect(void)
1663{ 1795{
1664 REG_DBG_PRINT("cfg80211: All devices are disconnected, going to " 1796 REG_DBG_PRINT("All devices are disconnected, going to "
1665 "restore regulatory settings\n"); 1797 "restore regulatory settings\n");
1666 restore_regulatory_settings(false); 1798 restore_regulatory_settings(false);
1667} 1799}
@@ -1691,7 +1823,7 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
1691 if (!reg_beacon) 1823 if (!reg_beacon)
1692 return -ENOMEM; 1824 return -ENOMEM;
1693 1825
1694 REG_DBG_PRINT("cfg80211: Found new beacon on " 1826 REG_DBG_PRINT("Found new beacon on "
1695 "frequency: %d MHz (Ch %d) on %s\n", 1827 "frequency: %d MHz (Ch %d) on %s\n",
1696 beacon_chan->center_freq, 1828 beacon_chan->center_freq,
1697 ieee80211_frequency_to_channel(beacon_chan->center_freq), 1829 ieee80211_frequency_to_channel(beacon_chan->center_freq),
@@ -1721,8 +1853,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
1721 const struct ieee80211_freq_range *freq_range = NULL; 1853 const struct ieee80211_freq_range *freq_range = NULL;
1722 const struct ieee80211_power_rule *power_rule = NULL; 1854 const struct ieee80211_power_rule *power_rule = NULL;
1723 1855
1724 printk(KERN_INFO " (start_freq - end_freq @ bandwidth), " 1856 pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
1725 "(max_antenna_gain, max_eirp)\n");
1726 1857
1727 for (i = 0; i < rd->n_reg_rules; i++) { 1858 for (i = 0; i < rd->n_reg_rules; i++) {
1728 reg_rule = &rd->reg_rules[i]; 1859 reg_rule = &rd->reg_rules[i];
@@ -1734,16 +1865,14 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
1734 * in certain regions 1865 * in certain regions
1735 */ 1866 */
1736 if (power_rule->max_antenna_gain) 1867 if (power_rule->max_antenna_gain)
1737 printk(KERN_INFO " (%d KHz - %d KHz @ %d KHz), " 1868 pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
1738 "(%d mBi, %d mBm)\n",
1739 freq_range->start_freq_khz, 1869 freq_range->start_freq_khz,
1740 freq_range->end_freq_khz, 1870 freq_range->end_freq_khz,
1741 freq_range->max_bandwidth_khz, 1871 freq_range->max_bandwidth_khz,
1742 power_rule->max_antenna_gain, 1872 power_rule->max_antenna_gain,
1743 power_rule->max_eirp); 1873 power_rule->max_eirp);
1744 else 1874 else
1745 printk(KERN_INFO " (%d KHz - %d KHz @ %d KHz), " 1875 pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
1746 "(N/A, %d mBm)\n",
1747 freq_range->start_freq_khz, 1876 freq_range->start_freq_khz,
1748 freq_range->end_freq_khz, 1877 freq_range->end_freq_khz,
1749 freq_range->max_bandwidth_khz, 1878 freq_range->max_bandwidth_khz,
@@ -1762,27 +1891,20 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
1762 rdev = cfg80211_rdev_by_wiphy_idx( 1891 rdev = cfg80211_rdev_by_wiphy_idx(
1763 last_request->wiphy_idx); 1892 last_request->wiphy_idx);
1764 if (rdev) { 1893 if (rdev) {
1765 printk(KERN_INFO "cfg80211: Current regulatory " 1894 pr_info("Current regulatory domain updated by AP to: %c%c\n",
1766 "domain updated by AP to: %c%c\n",
1767 rdev->country_ie_alpha2[0], 1895 rdev->country_ie_alpha2[0],
1768 rdev->country_ie_alpha2[1]); 1896 rdev->country_ie_alpha2[1]);
1769 } else 1897 } else
1770 printk(KERN_INFO "cfg80211: Current regulatory " 1898 pr_info("Current regulatory domain intersected:\n");
1771 "domain intersected:\n");
1772 } else 1899 } else
1773 printk(KERN_INFO "cfg80211: Current regulatory " 1900 pr_info("Current regulatory domain intersected:\n");
1774 "domain intersected:\n");
1775 } else if (is_world_regdom(rd->alpha2)) 1901 } else if (is_world_regdom(rd->alpha2))
1776 printk(KERN_INFO "cfg80211: World regulatory " 1902 pr_info("World regulatory domain updated:\n");
1777 "domain updated:\n");
1778 else { 1903 else {
1779 if (is_unknown_alpha2(rd->alpha2)) 1904 if (is_unknown_alpha2(rd->alpha2))
1780 printk(KERN_INFO "cfg80211: Regulatory domain " 1905 pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n");
1781 "changed to driver built-in settings "
1782 "(unknown country)\n");
1783 else 1906 else
1784 printk(KERN_INFO "cfg80211: Regulatory domain " 1907 pr_info("Regulatory domain changed to country: %c%c\n",
1785 "changed to country: %c%c\n",
1786 rd->alpha2[0], rd->alpha2[1]); 1908 rd->alpha2[0], rd->alpha2[1]);
1787 } 1909 }
1788 print_rd_rules(rd); 1910 print_rd_rules(rd);
@@ -1790,8 +1912,7 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
1790 1912
1791static void print_regdomain_info(const struct ieee80211_regdomain *rd) 1913static void print_regdomain_info(const struct ieee80211_regdomain *rd)
1792{ 1914{
1793 printk(KERN_INFO "cfg80211: Regulatory domain: %c%c\n", 1915 pr_info("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]);
1794 rd->alpha2[0], rd->alpha2[1]);
1795 print_rd_rules(rd); 1916 print_rd_rules(rd);
1796} 1917}
1797 1918
@@ -1842,8 +1963,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
1842 return -EINVAL; 1963 return -EINVAL;
1843 1964
1844 if (!is_valid_rd(rd)) { 1965 if (!is_valid_rd(rd)) {
1845 printk(KERN_ERR "cfg80211: Invalid " 1966 pr_err("Invalid regulatory domain detected:\n");
1846 "regulatory domain detected:\n");
1847 print_regdomain_info(rd); 1967 print_regdomain_info(rd);
1848 return -EINVAL; 1968 return -EINVAL;
1849 } 1969 }
@@ -1959,6 +2079,8 @@ int set_regdom(const struct ieee80211_regdomain *rd)
1959 2079
1960 nl80211_send_reg_change_event(last_request); 2080 nl80211_send_reg_change_event(last_request);
1961 2081
2082 reg_set_request_processed();
2083
1962 mutex_unlock(&reg_mutex); 2084 mutex_unlock(&reg_mutex);
1963 2085
1964 return r; 2086 return r;
@@ -2015,8 +2137,7 @@ int __init regulatory_init(void)
2015 * early boot for call_usermodehelper(). For now treat these 2137 * early boot for call_usermodehelper(). For now treat these
2016 * errors as non-fatal. 2138 * errors as non-fatal.
2017 */ 2139 */
2018 printk(KERN_ERR "cfg80211: kobject_uevent_env() was unable " 2140 pr_err("kobject_uevent_env() was unable to call CRDA during init\n");
2019 "to call CRDA during init");
2020#ifdef CONFIG_CFG80211_REG_DEBUG 2141#ifdef CONFIG_CFG80211_REG_DEBUG
2021 /* We want to find out exactly why when debugging */ 2142 /* We want to find out exactly why when debugging */
2022 WARN_ON(err); 2143 WARN_ON(err);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 76120aeda57..fee020b15a4 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -502,7 +502,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
502 skb_orphan(skb); 502 skb_orphan(skb);
503 503
504 if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) { 504 if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) {
505 printk(KERN_ERR "failed to reallocate Tx buffer\n"); 505 pr_err("failed to reallocate Tx buffer\n");
506 return -ENOMEM; 506 return -ENOMEM;
507 } 507 }
508 skb->truesize += head_need; 508 skb->truesize += head_need;
@@ -685,20 +685,17 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
685 continue; 685 continue;
686 if (rdev->ops->add_key(wdev->wiphy, dev, i, false, NULL, 686 if (rdev->ops->add_key(wdev->wiphy, dev, i, false, NULL,
687 &wdev->connect_keys->params[i])) { 687 &wdev->connect_keys->params[i])) {
688 printk(KERN_ERR "%s: failed to set key %d\n", 688 netdev_err(dev, "failed to set key %d\n", i);
689 dev->name, i);
690 continue; 689 continue;
691 } 690 }
692 if (wdev->connect_keys->def == i) 691 if (wdev->connect_keys->def == i)
693 if (rdev->ops->set_default_key(wdev->wiphy, dev, i)) { 692 if (rdev->ops->set_default_key(wdev->wiphy, dev, i)) {
694 printk(KERN_ERR "%s: failed to set defkey %d\n", 693 netdev_err(dev, "failed to set defkey %d\n", i);
695 dev->name, i);
696 continue; 694 continue;
697 } 695 }
698 if (wdev->connect_keys->defmgmt == i) 696 if (wdev->connect_keys->defmgmt == i)
699 if (rdev->ops->set_default_mgmt_key(wdev->wiphy, dev, i)) 697 if (rdev->ops->set_default_mgmt_key(wdev->wiphy, dev, i))
700 printk(KERN_ERR "%s: failed to set mgtdef %d\n", 698 netdev_err(dev, "failed to set mgtdef %d\n", i);
701 dev->name, i);
702 } 699 }
703 700
704 kfree(wdev->connect_keys); 701 kfree(wdev->connect_keys);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index dc675a3daa3..fdbc23c10d8 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -467,8 +467,8 @@ void wireless_send_event(struct net_device * dev,
467 * The best the driver could do is to log an error message. 467 * The best the driver could do is to log an error message.
468 * We will do it ourselves instead... 468 * We will do it ourselves instead...
469 */ 469 */
470 printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n", 470 netdev_err(dev, "(WE) : Invalid/Unknown Wireless Event (0x%04X)\n",
471 dev->name, cmd); 471 cmd);
472 return; 472 return;
473 } 473 }
474 474
@@ -476,11 +476,13 @@ void wireless_send_event(struct net_device * dev,
476 if (descr->header_type == IW_HEADER_TYPE_POINT) { 476 if (descr->header_type == IW_HEADER_TYPE_POINT) {
477 /* Check if number of token fits within bounds */ 477 /* Check if number of token fits within bounds */
478 if (wrqu->data.length > descr->max_tokens) { 478 if (wrqu->data.length > descr->max_tokens) {
479 printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length); 479 netdev_err(dev, "(WE) : Wireless Event too big (%d)\n",
480 wrqu->data.length);
480 return; 481 return;
481 } 482 }
482 if (wrqu->data.length < descr->min_tokens) { 483 if (wrqu->data.length < descr->min_tokens) {
483 printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length); 484 netdev_err(dev, "(WE) : Wireless Event too small (%d)\n",
485 wrqu->data.length);
484 return; 486 return;
485 } 487 }
486 /* Calculate extra_len - extra is NULL for restricted events */ 488 /* Calculate extra_len - extra is NULL for restricted events */
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index f7af98dff40..ad96ee90fe2 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1357,11 +1357,11 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1357 void __user *argp = (void __user *)arg; 1357 void __user *argp = (void __user *)arg;
1358 int rc; 1358 int rc;
1359 1359
1360 lock_kernel();
1361 switch (cmd) { 1360 switch (cmd) {
1362 case TIOCOUTQ: { 1361 case TIOCOUTQ: {
1363 int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1362 int amount;
1364 1363
1364 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1365 if (amount < 0) 1365 if (amount < 0)
1366 amount = 0; 1366 amount = 0;
1367 rc = put_user(amount, (unsigned int __user *)argp); 1367 rc = put_user(amount, (unsigned int __user *)argp);
@@ -1375,8 +1375,10 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1375 * These two are safe on a single CPU system as 1375 * These two are safe on a single CPU system as
1376 * only user tasks fiddle here 1376 * only user tasks fiddle here
1377 */ 1377 */
1378 lock_sock(sk);
1378 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1379 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1379 amount = skb->len; 1380 amount = skb->len;
1381 release_sock(sk);
1380 rc = put_user(amount, (unsigned int __user *)argp); 1382 rc = put_user(amount, (unsigned int __user *)argp);
1381 break; 1383 break;
1382 } 1384 }
@@ -1422,9 +1424,11 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1422 rc = x25_subscr_ioctl(cmd, argp); 1424 rc = x25_subscr_ioctl(cmd, argp);
1423 break; 1425 break;
1424 case SIOCX25GFACILITIES: { 1426 case SIOCX25GFACILITIES: {
1425 struct x25_facilities fac = x25->facilities; 1427 lock_sock(sk);
1426 rc = copy_to_user(argp, &fac, 1428 rc = copy_to_user(argp, &x25->facilities,
1427 sizeof(fac)) ? -EFAULT : 0; 1429 sizeof(x25->facilities))
1430 ? -EFAULT : 0;
1431 release_sock(sk);
1428 break; 1432 break;
1429 } 1433 }
1430 1434
@@ -1435,18 +1439,19 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1435 sizeof(facilities))) 1439 sizeof(facilities)))
1436 break; 1440 break;
1437 rc = -EINVAL; 1441 rc = -EINVAL;
1442 lock_sock(sk);
1438 if (sk->sk_state != TCP_LISTEN && 1443 if (sk->sk_state != TCP_LISTEN &&
1439 sk->sk_state != TCP_CLOSE) 1444 sk->sk_state != TCP_CLOSE)
1440 break; 1445 goto out_fac_release;
1441 if (facilities.pacsize_in < X25_PS16 || 1446 if (facilities.pacsize_in < X25_PS16 ||
1442 facilities.pacsize_in > X25_PS4096) 1447 facilities.pacsize_in > X25_PS4096)
1443 break; 1448 goto out_fac_release;
1444 if (facilities.pacsize_out < X25_PS16 || 1449 if (facilities.pacsize_out < X25_PS16 ||
1445 facilities.pacsize_out > X25_PS4096) 1450 facilities.pacsize_out > X25_PS4096)
1446 break; 1451 goto out_fac_release;
1447 if (facilities.winsize_in < 1 || 1452 if (facilities.winsize_in < 1 ||
1448 facilities.winsize_in > 127) 1453 facilities.winsize_in > 127)
1449 break; 1454 goto out_fac_release;
1450 if (facilities.throughput) { 1455 if (facilities.throughput) {
1451 int out = facilities.throughput & 0xf0; 1456 int out = facilities.throughput & 0xf0;
1452 int in = facilities.throughput & 0x0f; 1457 int in = facilities.throughput & 0x0f;
@@ -1454,24 +1459,28 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1454 facilities.throughput |= 1459 facilities.throughput |=
1455 X25_DEFAULT_THROUGHPUT << 4; 1460 X25_DEFAULT_THROUGHPUT << 4;
1456 else if (out < 0x30 || out > 0xD0) 1461 else if (out < 0x30 || out > 0xD0)
1457 break; 1462 goto out_fac_release;
1458 if (!in) 1463 if (!in)
1459 facilities.throughput |= 1464 facilities.throughput |=
1460 X25_DEFAULT_THROUGHPUT; 1465 X25_DEFAULT_THROUGHPUT;
1461 else if (in < 0x03 || in > 0x0D) 1466 else if (in < 0x03 || in > 0x0D)
1462 break; 1467 goto out_fac_release;
1463 } 1468 }
1464 if (facilities.reverse && 1469 if (facilities.reverse &&
1465 (facilities.reverse & 0x81) != 0x81) 1470 (facilities.reverse & 0x81) != 0x81)
1466 break; 1471 goto out_fac_release;
1467 x25->facilities = facilities; 1472 x25->facilities = facilities;
1468 rc = 0; 1473 rc = 0;
1474out_fac_release:
1475 release_sock(sk);
1469 break; 1476 break;
1470 } 1477 }
1471 1478
1472 case SIOCX25GDTEFACILITIES: { 1479 case SIOCX25GDTEFACILITIES: {
1480 lock_sock(sk);
1473 rc = copy_to_user(argp, &x25->dte_facilities, 1481 rc = copy_to_user(argp, &x25->dte_facilities,
1474 sizeof(x25->dte_facilities)); 1482 sizeof(x25->dte_facilities));
1483 release_sock(sk);
1475 if (rc) 1484 if (rc)
1476 rc = -EFAULT; 1485 rc = -EFAULT;
1477 break; 1486 break;
@@ -1483,26 +1492,31 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1483 if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) 1492 if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
1484 break; 1493 break;
1485 rc = -EINVAL; 1494 rc = -EINVAL;
1495 lock_sock(sk);
1486 if (sk->sk_state != TCP_LISTEN && 1496 if (sk->sk_state != TCP_LISTEN &&
1487 sk->sk_state != TCP_CLOSE) 1497 sk->sk_state != TCP_CLOSE)
1488 break; 1498 goto out_dtefac_release;
1489 if (dtefacs.calling_len > X25_MAX_AE_LEN) 1499 if (dtefacs.calling_len > X25_MAX_AE_LEN)
1490 break; 1500 goto out_dtefac_release;
1491 if (dtefacs.calling_ae == NULL) 1501 if (dtefacs.calling_ae == NULL)
1492 break; 1502 goto out_dtefac_release;
1493 if (dtefacs.called_len > X25_MAX_AE_LEN) 1503 if (dtefacs.called_len > X25_MAX_AE_LEN)
1494 break; 1504 goto out_dtefac_release;
1495 if (dtefacs.called_ae == NULL) 1505 if (dtefacs.called_ae == NULL)
1496 break; 1506 goto out_dtefac_release;
1497 x25->dte_facilities = dtefacs; 1507 x25->dte_facilities = dtefacs;
1498 rc = 0; 1508 rc = 0;
1509out_dtefac_release:
1510 release_sock(sk);
1499 break; 1511 break;
1500 } 1512 }
1501 1513
1502 case SIOCX25GCALLUSERDATA: { 1514 case SIOCX25GCALLUSERDATA: {
1503 struct x25_calluserdata cud = x25->calluserdata; 1515 lock_sock(sk);
1504 rc = copy_to_user(argp, &cud, 1516 rc = copy_to_user(argp, &x25->calluserdata,
1505 sizeof(cud)) ? -EFAULT : 0; 1517 sizeof(x25->calluserdata))
1518 ? -EFAULT : 0;
1519 release_sock(sk);
1506 break; 1520 break;
1507 } 1521 }
1508 1522
@@ -1516,16 +1530,19 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1516 rc = -EINVAL; 1530 rc = -EINVAL;
1517 if (calluserdata.cudlength > X25_MAX_CUD_LEN) 1531 if (calluserdata.cudlength > X25_MAX_CUD_LEN)
1518 break; 1532 break;
1533 lock_sock(sk);
1519 x25->calluserdata = calluserdata; 1534 x25->calluserdata = calluserdata;
1535 release_sock(sk);
1520 rc = 0; 1536 rc = 0;
1521 break; 1537 break;
1522 } 1538 }
1523 1539
1524 case SIOCX25GCAUSEDIAG: { 1540 case SIOCX25GCAUSEDIAG: {
1525 struct x25_causediag causediag; 1541 lock_sock(sk);
1526 causediag = x25->causediag; 1542 rc = copy_to_user(argp, &x25->causediag,
1527 rc = copy_to_user(argp, &causediag, 1543 sizeof(x25->causediag))
1528 sizeof(causediag)) ? -EFAULT : 0; 1544 ? -EFAULT : 0;
1545 release_sock(sk);
1529 break; 1546 break;
1530 } 1547 }
1531 1548
@@ -1534,7 +1551,9 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1534 rc = -EFAULT; 1551 rc = -EFAULT;
1535 if (copy_from_user(&causediag, argp, sizeof(causediag))) 1552 if (copy_from_user(&causediag, argp, sizeof(causediag)))
1536 break; 1553 break;
1554 lock_sock(sk);
1537 x25->causediag = causediag; 1555 x25->causediag = causediag;
1556 release_sock(sk);
1538 rc = 0; 1557 rc = 0;
1539 break; 1558 break;
1540 1559
@@ -1543,31 +1562,37 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1543 case SIOCX25SCUDMATCHLEN: { 1562 case SIOCX25SCUDMATCHLEN: {
1544 struct x25_subaddr sub_addr; 1563 struct x25_subaddr sub_addr;
1545 rc = -EINVAL; 1564 rc = -EINVAL;
1565 lock_sock(sk);
1546 if(sk->sk_state != TCP_CLOSE) 1566 if(sk->sk_state != TCP_CLOSE)
1547 break; 1567 goto out_cud_release;
1548 rc = -EFAULT; 1568 rc = -EFAULT;
1549 if (copy_from_user(&sub_addr, argp, 1569 if (copy_from_user(&sub_addr, argp,
1550 sizeof(sub_addr))) 1570 sizeof(sub_addr)))
1551 break; 1571 goto out_cud_release;
1552 rc = -EINVAL; 1572 rc = -EINVAL;
1553 if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN) 1573 if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
1554 break; 1574 goto out_cud_release;
1555 x25->cudmatchlength = sub_addr.cudmatchlength; 1575 x25->cudmatchlength = sub_addr.cudmatchlength;
1556 rc = 0; 1576 rc = 0;
1577out_cud_release:
1578 release_sock(sk);
1557 break; 1579 break;
1558 } 1580 }
1559 1581
1560 case SIOCX25CALLACCPTAPPRV: { 1582 case SIOCX25CALLACCPTAPPRV: {
1561 rc = -EINVAL; 1583 rc = -EINVAL;
1584 lock_kernel();
1562 if (sk->sk_state != TCP_CLOSE) 1585 if (sk->sk_state != TCP_CLOSE)
1563 break; 1586 break;
1564 clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); 1587 clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
1588 unlock_kernel();
1565 rc = 0; 1589 rc = 0;
1566 break; 1590 break;
1567 } 1591 }
1568 1592
1569 case SIOCX25SENDCALLACCPT: { 1593 case SIOCX25SENDCALLACCPT: {
1570 rc = -EINVAL; 1594 rc = -EINVAL;
1595 lock_kernel();
1571 if (sk->sk_state != TCP_ESTABLISHED) 1596 if (sk->sk_state != TCP_ESTABLISHED)
1572 break; 1597 break;
1573 /* must call accptapprv above */ 1598 /* must call accptapprv above */
@@ -1575,6 +1600,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1575 break; 1600 break;
1576 x25_write_internal(sk, X25_CALL_ACCEPTED); 1601 x25_write_internal(sk, X25_CALL_ACCEPTED);
1577 x25->state = X25_STATE_3; 1602 x25->state = X25_STATE_3;
1603 unlock_kernel();
1578 rc = 0; 1604 rc = 0;
1579 break; 1605 break;
1580 } 1606 }
@@ -1583,7 +1609,6 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1583 rc = -ENOIOCTLCMD; 1609 rc = -ENOIOCTLCMD;
1584 break; 1610 break;
1585 } 1611 }
1586 unlock_kernel();
1587 1612
1588 return rc; 1613 return rc;
1589} 1614}
@@ -1619,16 +1644,20 @@ static int compat_x25_subscr_ioctl(unsigned int cmd,
1619 dev_put(dev); 1644 dev_put(dev);
1620 1645
1621 if (cmd == SIOCX25GSUBSCRIP) { 1646 if (cmd == SIOCX25GSUBSCRIP) {
1647 read_lock_bh(&x25_neigh_list_lock);
1622 x25_subscr.extended = nb->extended; 1648 x25_subscr.extended = nb->extended;
1623 x25_subscr.global_facil_mask = nb->global_facil_mask; 1649 x25_subscr.global_facil_mask = nb->global_facil_mask;
1650 read_unlock_bh(&x25_neigh_list_lock);
1624 rc = copy_to_user(x25_subscr32, &x25_subscr, 1651 rc = copy_to_user(x25_subscr32, &x25_subscr,
1625 sizeof(*x25_subscr32)) ? -EFAULT : 0; 1652 sizeof(*x25_subscr32)) ? -EFAULT : 0;
1626 } else { 1653 } else {
1627 rc = -EINVAL; 1654 rc = -EINVAL;
1628 if (x25_subscr.extended == 0 || x25_subscr.extended == 1) { 1655 if (x25_subscr.extended == 0 || x25_subscr.extended == 1) {
1629 rc = 0; 1656 rc = 0;
1657 write_lock_bh(&x25_neigh_list_lock);
1630 nb->extended = x25_subscr.extended; 1658 nb->extended = x25_subscr.extended;
1631 nb->global_facil_mask = x25_subscr.global_facil_mask; 1659 nb->global_facil_mask = x25_subscr.global_facil_mask;
1660 write_unlock_bh(&x25_neigh_list_lock);
1632 } 1661 }
1633 } 1662 }
1634 x25_neigh_put(nb); 1663 x25_neigh_put(nb);
@@ -1654,19 +1683,15 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
1654 break; 1683 break;
1655 case SIOCGSTAMP: 1684 case SIOCGSTAMP:
1656 rc = -EINVAL; 1685 rc = -EINVAL;
1657 lock_kernel();
1658 if (sk) 1686 if (sk)
1659 rc = compat_sock_get_timestamp(sk, 1687 rc = compat_sock_get_timestamp(sk,
1660 (struct timeval __user*)argp); 1688 (struct timeval __user*)argp);
1661 unlock_kernel();
1662 break; 1689 break;
1663 case SIOCGSTAMPNS: 1690 case SIOCGSTAMPNS:
1664 rc = -EINVAL; 1691 rc = -EINVAL;
1665 lock_kernel();
1666 if (sk) 1692 if (sk)
1667 rc = compat_sock_get_timestampns(sk, 1693 rc = compat_sock_get_timestampns(sk,
1668 (struct timespec __user*)argp); 1694 (struct timespec __user*)argp);
1669 unlock_kernel();
1670 break; 1695 break;
1671 case SIOCGIFADDR: 1696 case SIOCGIFADDR:
1672 case SIOCSIFADDR: 1697 case SIOCSIFADDR:
@@ -1685,22 +1710,16 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
1685 rc = -EPERM; 1710 rc = -EPERM;
1686 if (!capable(CAP_NET_ADMIN)) 1711 if (!capable(CAP_NET_ADMIN))
1687 break; 1712 break;
1688 lock_kernel();
1689 rc = x25_route_ioctl(cmd, argp); 1713 rc = x25_route_ioctl(cmd, argp);
1690 unlock_kernel();
1691 break; 1714 break;
1692 case SIOCX25GSUBSCRIP: 1715 case SIOCX25GSUBSCRIP:
1693 lock_kernel();
1694 rc = compat_x25_subscr_ioctl(cmd, argp); 1716 rc = compat_x25_subscr_ioctl(cmd, argp);
1695 unlock_kernel();
1696 break; 1717 break;
1697 case SIOCX25SSUBSCRIP: 1718 case SIOCX25SSUBSCRIP:
1698 rc = -EPERM; 1719 rc = -EPERM;
1699 if (!capable(CAP_NET_ADMIN)) 1720 if (!capable(CAP_NET_ADMIN))
1700 break; 1721 break;
1701 lock_kernel();
1702 rc = compat_x25_subscr_ioctl(cmd, argp); 1722 rc = compat_x25_subscr_ioctl(cmd, argp);
1703 unlock_kernel();
1704 break; 1723 break;
1705 case SIOCX25GFACILITIES: 1724 case SIOCX25GFACILITIES:
1706 case SIOCX25SFACILITIES: 1725 case SIOCX25SFACILITIES:
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index b25c6463c3e..4cbc942f762 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -31,8 +31,8 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <net/x25.h> 32#include <net/x25.h>
33 33
34static LIST_HEAD(x25_neigh_list); 34LIST_HEAD(x25_neigh_list);
35static DEFINE_RWLOCK(x25_neigh_list_lock); 35DEFINE_RWLOCK(x25_neigh_list_lock);
36 36
37static void x25_t20timer_expiry(unsigned long); 37static void x25_t20timer_expiry(unsigned long);
38 38
@@ -360,16 +360,20 @@ int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
360 dev_put(dev); 360 dev_put(dev);
361 361
362 if (cmd == SIOCX25GSUBSCRIP) { 362 if (cmd == SIOCX25GSUBSCRIP) {
363 read_lock_bh(&x25_neigh_list_lock);
363 x25_subscr.extended = nb->extended; 364 x25_subscr.extended = nb->extended;
364 x25_subscr.global_facil_mask = nb->global_facil_mask; 365 x25_subscr.global_facil_mask = nb->global_facil_mask;
366 read_unlock_bh(&x25_neigh_list_lock);
365 rc = copy_to_user(arg, &x25_subscr, 367 rc = copy_to_user(arg, &x25_subscr,
366 sizeof(x25_subscr)) ? -EFAULT : 0; 368 sizeof(x25_subscr)) ? -EFAULT : 0;
367 } else { 369 } else {
368 rc = -EINVAL; 370 rc = -EINVAL;
369 if (!(x25_subscr.extended && x25_subscr.extended != 1)) { 371 if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
370 rc = 0; 372 rc = 0;
373 write_lock_bh(&x25_neigh_list_lock);
371 nb->extended = x25_subscr.extended; 374 nb->extended = x25_subscr.extended;
372 nb->global_facil_mask = x25_subscr.global_facil_mask; 375 nb->global_facil_mask = x25_subscr.global_facil_mask;
376 write_unlock_bh(&x25_neigh_list_lock);
373 } 377 }
374 } 378 }
375 x25_neigh_put(nb); 379 x25_neigh_put(nb);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index d9154cf90ae..156ef93d6f7 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4524,11 +4524,11 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
4524 if (selinux_secmark_enabled()) 4524 if (selinux_secmark_enabled())
4525 if (avc_has_perm(sksec->sid, skb->secmark, 4525 if (avc_has_perm(sksec->sid, skb->secmark,
4526 SECCLASS_PACKET, PACKET__SEND, &ad)) 4526 SECCLASS_PACKET, PACKET__SEND, &ad))
4527 return NF_DROP; 4527 return NF_DROP_ERR(-ECONNREFUSED);
4528 4528
4529 if (selinux_policycap_netpeer) 4529 if (selinux_policycap_netpeer)
4530 if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto)) 4530 if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
4531 return NF_DROP; 4531 return NF_DROP_ERR(-ECONNREFUSED);
4532 4532
4533 return NF_ACCEPT; 4533 return NF_ACCEPT;
4534} 4534}
@@ -4585,7 +4585,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4585 secmark_perm = PACKET__SEND; 4585 secmark_perm = PACKET__SEND;
4586 break; 4586 break;
4587 default: 4587 default:
4588 return NF_DROP; 4588 return NF_DROP_ERR(-ECONNREFUSED);
4589 } 4589 }
4590 if (secmark_perm == PACKET__FORWARD_OUT) { 4590 if (secmark_perm == PACKET__FORWARD_OUT) {
4591 if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) 4591 if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
@@ -4607,7 +4607,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4607 if (secmark_active) 4607 if (secmark_active)
4608 if (avc_has_perm(peer_sid, skb->secmark, 4608 if (avc_has_perm(peer_sid, skb->secmark,
4609 SECCLASS_PACKET, secmark_perm, &ad)) 4609 SECCLASS_PACKET, secmark_perm, &ad))
4610 return NF_DROP; 4610 return NF_DROP_ERR(-ECONNREFUSED);
4611 4611
4612 if (peerlbl_active) { 4612 if (peerlbl_active) {
4613 u32 if_sid; 4613 u32 if_sid;
@@ -4617,13 +4617,13 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4617 return NF_DROP; 4617 return NF_DROP;
4618 if (avc_has_perm(peer_sid, if_sid, 4618 if (avc_has_perm(peer_sid, if_sid,
4619 SECCLASS_NETIF, NETIF__EGRESS, &ad)) 4619 SECCLASS_NETIF, NETIF__EGRESS, &ad))
4620 return NF_DROP; 4620 return NF_DROP_ERR(-ECONNREFUSED);
4621 4621
4622 if (sel_netnode_sid(addrp, family, &node_sid)) 4622 if (sel_netnode_sid(addrp, family, &node_sid))
4623 return NF_DROP; 4623 return NF_DROP;
4624 if (avc_has_perm(peer_sid, node_sid, 4624 if (avc_has_perm(peer_sid, node_sid,
4625 SECCLASS_NODE, NODE__SENDTO, &ad)) 4625 SECCLASS_NODE, NODE__SENDTO, &ad))
4626 return NF_DROP; 4626 return NF_DROP_ERR(-ECONNREFUSED);
4627 } 4627 }
4628 4628
4629 return NF_ACCEPT; 4629 return NF_ACCEPT;